From e484028434d138593983ce8998fd5b4f5ce1c140 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Schneider?= Date: Fri, 31 Oct 2014 17:36:08 +0100 Subject: [PATCH] Import from kombu from tarball --- AUTHORS | 118 + Changelog | 3012 +++++++++++++++++ FAQ | 16 + INSTALL | 21 + LICENSE | 26 + MANIFEST.in | 17 + PKG-INFO | 355 ++ README.rst | 327 ++ THANKS | 32 + TODO | 2 + docs/.static/.keep | 0 docs/.templates/sidebarintro.html | 7 + docs/.templates/sidebarlogo.html | 3 + docs/Makefile | 75 + docs/_ext/applyxrefs.py | 90 + docs/_ext/literals_to_xrefs.py | 180 + docs/_theme/celery/static/celery.css_t | 394 +++ docs/_theme/celery/theme.conf | 5 + docs/changelog.rst | 3012 +++++++++++++++++ docs/conf.py | 75 + docs/faq.rst | 16 + docs/images/kombu.jpg | Bin 0 -> 115763 bytes docs/images/kombusmall.jpg | Bin 0 -> 28752 bytes docs/index.rst | 26 + docs/introduction.rst | 327 ++ docs/reference/index.rst | 67 + docs/reference/kombu.abstract.rst | 10 + docs/reference/kombu.async.debug.rst | 11 + docs/reference/kombu.async.hub.rst | 11 + docs/reference/kombu.async.rst | 11 + docs/reference/kombu.async.semaphore.rst | 11 + docs/reference/kombu.async.timer.rst | 11 + docs/reference/kombu.clocks.rst | 11 + docs/reference/kombu.common.rst | 11 + docs/reference/kombu.compat.rst | 36 + docs/reference/kombu.compression.rst | 20 + docs/reference/kombu.connection.rst | 40 + docs/reference/kombu.exceptions.rst | 14 + docs/reference/kombu.five.rst | 11 + docs/reference/kombu.log.rst | 11 + docs/reference/kombu.message.rst | 11 + docs/reference/kombu.mixins.rst | 11 + docs/reference/kombu.pidbox.rst | 89 + docs/reference/kombu.pools.rst | 11 + docs/reference/kombu.rst | 187 + docs/reference/kombu.serialization.rst | 47 + docs/reference/kombu.simple.rst | 89 + docs/reference/kombu.syn.rst | 11 + docs/reference/kombu.transport.SLMQ.rst | 24 + docs/reference/kombu.transport.SQS.rst | 20 + docs/reference/kombu.transport.amqplib.rst | 36 + docs/reference/kombu.transport.base.rst | 62 + docs/reference/kombu.transport.beanstalk.rst | 20 + docs/reference/kombu.transport.couchdb.rst | 25 + ...nagement.commands.clean_kombu_messages.rst | 14 + .../kombu.transport.django.managers.rst | 11 + .../kombu.transport.django.models.rst | 11 + docs/reference/kombu.transport.django.rst | 24 + docs/reference/kombu.transport.filesystem.rst | 21 + .../reference/kombu.transport.librabbitmq.rst | 35 + docs/reference/kombu.transport.memory.rst | 20 + docs/reference/kombu.transport.mongodb.rst | 20 + docs/reference/kombu.transport.pyamqp.rst | 36 + docs/reference/kombu.transport.pyro.rst | 20 + docs/reference/kombu.transport.redis.rst | 20 + docs/reference/kombu.transport.rst | 23 + .../kombu.transport.sqlalchemy.models.rst | 27 + docs/reference/kombu.transport.sqlalchemy.rst | 25 + .../kombu.transport.virtual.exchange.rst | 35 + docs/reference/kombu.transport.virtual.rst | 117 + .../kombu.transport.virtual.scheduling.rst | 7 + docs/reference/kombu.transport.zmq.rst | 13 + docs/reference/kombu.transport.zookeeper.rst | 25 + docs/reference/kombu.utils.amq_manager.rst | 11 + docs/reference/kombu.utils.compat.rst | 11 + docs/reference/kombu.utils.debug.rst | 11 + docs/reference/kombu.utils.encoding.rst | 11 + docs/reference/kombu.utils.eventio.rst | 11 + docs/reference/kombu.utils.functional.rst | 11 + docs/reference/kombu.utils.limits.rst | 11 + docs/reference/kombu.utils.rst | 11 + docs/reference/kombu.utils.text.rst | 11 + docs/reference/kombu.utils.url.rst | 11 + docs/userguide/connections.rst | 178 + docs/userguide/consumers.rst | 103 + docs/userguide/examples.rst | 57 + docs/userguide/index.rst | 18 + docs/userguide/introduction.rst | 100 + docs/userguide/pools.rst | 175 + docs/userguide/producers.rst | 24 + docs/userguide/serialization.rst | 184 + docs/userguide/simple.rst | 116 + examples/complete_receive.py | 41 + examples/complete_send.py | 30 + examples/experimental/async_consume.py | 29 + examples/hello_consumer.py | 8 + examples/hello_publisher.py | 9 + examples/simple_eventlet_receive.py | 39 + examples/simple_eventlet_send.py | 40 + examples/simple_receive.py | 26 + examples/simple_send.py | 29 + examples/simple_task_queue/__init__.py | 0 examples/simple_task_queue/client.py | 28 + examples/simple_task_queue/queues.py | 6 + examples/simple_task_queue/tasks.py | 2 + examples/simple_task_queue/worker.py | 42 + extra/doc2ghpages | 13 + extra/release/bump_version.py | 166 + extra/release/doc4allmods | 38 + extra/release/flakeplus.py | 125 + extra/release/removepyc.sh | 3 + extra/release/verify-reference-index.sh | 19 + funtests/__init__.py | 5 + funtests/setup.cfg | 4 + funtests/setup.py | 67 + funtests/tests/__init__.py | 7 + funtests/tests/test_SLMQ.py | 29 + funtests/tests/test_SQS.py | 28 + funtests/tests/test_amqp.py | 6 + funtests/tests/test_amqplib.py | 14 + funtests/tests/test_beanstalk.py | 19 + funtests/tests/test_couchdb.py | 18 + funtests/tests/test_django.py | 37 + funtests/tests/test_librabbitmq.py | 14 + funtests/tests/test_mongodb.py | 80 + funtests/tests/test_pyamqp.py | 6 + funtests/tests/test_redis.py | 22 + funtests/tests/test_sqla.py | 16 + funtests/tests/test_zookeeper.py | 18 + funtests/transport.py | 313 ++ kombu.egg-info/PKG-INFO | 355 ++ kombu.egg-info/SOURCES.txt | 266 ++ kombu.egg-info/dependency_links.txt | 1 + kombu.egg-info/not-zip-safe | 1 + kombu.egg-info/requires.txt | 45 + kombu.egg-info/top_level.txt | 1 + kombu/__init__.py | 108 + kombu/abstract.py | 116 + kombu/async/__init__.py | 15 + kombu/async/debug.py | 60 + kombu/async/hub.py | 350 ++ kombu/async/semaphore.py | 110 + kombu/async/timer.py | 232 ++ kombu/clocks.py | 148 + kombu/common.py | 398 +++ kombu/compat.py | 215 ++ kombu/compression.py | 83 + kombu/connection.py | 1059 ++++++ kombu/entity.py | 718 ++++ kombu/exceptions.py | 83 + kombu/five.py | 203 ++ kombu/log.py | 147 + kombu/message.py | 154 + kombu/messaging.py | 602 ++++ kombu/mixins.py | 251 ++ kombu/pidbox.py | 364 ++ kombu/pools.py | 153 + kombu/serialization.py | 455 +++ kombu/simple.py | 137 + kombu/syn.py | 53 + kombu/tests/__init__.py | 91 + kombu/tests/async/__init__.py | 0 kombu/tests/async/test_hub.py | 33 + kombu/tests/async/test_semaphore.py | 45 + kombu/tests/case.py | 191 ++ kombu/tests/mocks.py | 148 + kombu/tests/test_clocks.py | 104 + kombu/tests/test_common.py | 416 +++ kombu/tests/test_compat.py | 331 ++ kombu/tests/test_compression.py | 50 + kombu/tests/test_connection.py | 688 ++++ kombu/tests/test_entities.py | 366 ++ kombu/tests/test_log.py | 165 + kombu/tests/test_messaging.py | 611 ++++ kombu/tests/test_mixins.py | 239 ++ kombu/tests/test_pidbox.py | 287 ++ kombu/tests/test_pools.py | 239 ++ kombu/tests/test_serialization.py | 348 ++ kombu/tests/test_simple.py | 136 + kombu/tests/test_syn.py | 61 + kombu/tests/transport/__init__.py | 0 kombu/tests/transport/test_SQS.py | 296 ++ kombu/tests/transport/test_amqplib.py | 162 + kombu/tests/transport/test_base.py | 148 + kombu/tests/transport/test_filesystem.py | 123 + kombu/tests/transport/test_librabbitmq.py | 150 + kombu/tests/transport/test_memory.py | 157 + kombu/tests/transport/test_mongodb.py | 120 + kombu/tests/transport/test_pyamqp.py | 179 + kombu/tests/transport/test_redis.py | 1237 +++++++ kombu/tests/transport/test_sqlalchemy.py | 69 + kombu/tests/transport/test_transport.py | 44 + kombu/tests/transport/virtual/__init__.py | 0 kombu/tests/transport/virtual/test_base.py | 540 +++ .../tests/transport/virtual/test_exchange.py | 161 + .../transport/virtual/test_scheduling.py | 67 + kombu/tests/utils/__init__.py | 0 kombu/tests/utils/test_amq_manager.py | 36 + kombu/tests/utils/test_debug.py | 56 + kombu/tests/utils/test_encoding.py | 102 + kombu/tests/utils/test_functional.py | 63 + kombu/tests/utils/test_utils.py | 412 +++ kombu/transport/SLMQ.py | 186 + kombu/transport/SQS.py | 539 +++ kombu/transport/__init__.py | 109 + kombu/transport/amqplib.py | 402 +++ kombu/transport/base.py | 173 + kombu/transport/beanstalk.py | 155 + kombu/transport/couchdb.py | 142 + kombu/transport/django/__init__.py | 68 + kombu/transport/django/management/__init__.py | 0 .../django/management/commands/__init__.py | 0 .../commands/clean_kombu_messages.py | 22 + kombu/transport/django/managers.py | 86 + .../django/migrations/0001_initial.py | 57 + kombu/transport/django/migrations/__init__.py | 0 kombu/transport/django/models.py | 32 + kombu/transport/filesystem.py | 193 ++ kombu/transport/librabbitmq.py | 173 + kombu/transport/memory.py | 77 + kombu/transport/mongodb.py | 314 ++ kombu/transport/pyamqp.py | 146 + kombu/transport/pyro.py | 99 + kombu/transport/redis.py | 957 ++++++ kombu/transport/sqlalchemy/__init__.py | 160 + kombu/transport/sqlalchemy/models.py | 62 + kombu/transport/virtual/__init__.py | 854 +++++ kombu/transport/virtual/exchange.py | 134 + kombu/transport/virtual/scheduling.py | 49 + kombu/transport/zmq.py | 314 ++ kombu/transport/zookeeper.py | 188 + kombu/utils/__init__.py | 450 +++ kombu/utils/amq_manager.py | 18 + kombu/utils/compat.py | 60 + kombu/utils/debug.py | 65 + kombu/utils/encoding.py | 129 + kombu/utils/eventio.py | 265 ++ kombu/utils/functional.py | 82 + kombu/utils/limits.py | 68 + kombu/utils/text.py | 47 + kombu/utils/url.py | 64 + requirements/default.txt | 2 + requirements/dev.txt | 1 + requirements/docs.txt | 3 + requirements/extras/beanstalk.txt | 1 + requirements/extras/couchdb.txt | 1 + requirements/extras/kazoo.txt | 1 + requirements/extras/librabbitmq.txt | 1 + requirements/extras/mongodb.txt | 1 + requirements/extras/msgpack.txt | 1 + requirements/extras/pyro.txt | 1 + requirements/extras/redis.txt | 1 + requirements/extras/slmq.txt | 1 + requirements/extras/sqlalchemy.txt | 1 + requirements/extras/sqs.txt | 1 + requirements/extras/yaml.txt | 1 + requirements/extras/zeromq.txt | 1 + requirements/extras/zookeeper.txt | 1 + requirements/funtest.txt | 24 + requirements/pkgutils.txt | 3 + requirements/py26.txt | 2 + requirements/test-ci.txt | 6 + requirements/test-ci3.txt | 5 + requirements/test.txt | 3 + requirements/test3.txt | 3 + setup.cfg | 34 + setup.py | 177 + 267 files changed, 34800 insertions(+) create mode 100644 AUTHORS create mode 100644 Changelog create mode 100644 FAQ create mode 100644 INSTALL create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 PKG-INFO create mode 100644 README.rst create mode 100644 THANKS create mode 100644 TODO create mode 100644 docs/.static/.keep create mode 100644 docs/.templates/sidebarintro.html create mode 100644 docs/.templates/sidebarlogo.html create mode 100644 docs/Makefile create mode 100644 docs/_ext/applyxrefs.py create mode 100644 docs/_ext/literals_to_xrefs.py create mode 100644 docs/_theme/celery/static/celery.css_t create mode 100644 docs/_theme/celery/theme.conf create mode 100644 docs/changelog.rst create mode 100644 docs/conf.py create mode 100644 docs/faq.rst create mode 100644 docs/images/kombu.jpg create mode 100644 docs/images/kombusmall.jpg create mode 100644 docs/index.rst create mode 100644 docs/introduction.rst create mode 100644 docs/reference/index.rst create mode 100644 docs/reference/kombu.abstract.rst create mode 100644 docs/reference/kombu.async.debug.rst create mode 100644 docs/reference/kombu.async.hub.rst create mode 100644 docs/reference/kombu.async.rst create mode 100644 docs/reference/kombu.async.semaphore.rst create mode 100644 docs/reference/kombu.async.timer.rst create mode 100644 docs/reference/kombu.clocks.rst create mode 100644 docs/reference/kombu.common.rst create mode 100644 docs/reference/kombu.compat.rst create mode 100644 docs/reference/kombu.compression.rst create mode 100644 docs/reference/kombu.connection.rst create mode 100644 docs/reference/kombu.exceptions.rst create mode 100644 docs/reference/kombu.five.rst create mode 100644 docs/reference/kombu.log.rst create mode 100644 docs/reference/kombu.message.rst create mode 100644 docs/reference/kombu.mixins.rst create mode 100644 docs/reference/kombu.pidbox.rst create mode 100644 docs/reference/kombu.pools.rst create mode 100644 docs/reference/kombu.rst create mode 100644 docs/reference/kombu.serialization.rst create mode 100644 docs/reference/kombu.simple.rst create mode 100644 docs/reference/kombu.syn.rst create mode 100644 docs/reference/kombu.transport.SLMQ.rst create mode 100644 docs/reference/kombu.transport.SQS.rst create mode 100644 docs/reference/kombu.transport.amqplib.rst create mode 100644 docs/reference/kombu.transport.base.rst create mode 100644 docs/reference/kombu.transport.beanstalk.rst create mode 100644 docs/reference/kombu.transport.couchdb.rst create mode 100644 docs/reference/kombu.transport.django.management.commands.clean_kombu_messages.rst create mode 100644 docs/reference/kombu.transport.django.managers.rst create mode 100644 docs/reference/kombu.transport.django.models.rst create mode 100644 docs/reference/kombu.transport.django.rst create mode 100644 docs/reference/kombu.transport.filesystem.rst create mode 100644 docs/reference/kombu.transport.librabbitmq.rst create mode 100644 docs/reference/kombu.transport.memory.rst create mode 100644 docs/reference/kombu.transport.mongodb.rst create mode 100644 docs/reference/kombu.transport.pyamqp.rst create mode 100644 docs/reference/kombu.transport.pyro.rst create mode 100644 docs/reference/kombu.transport.redis.rst create mode 100644 docs/reference/kombu.transport.rst create mode 100644 docs/reference/kombu.transport.sqlalchemy.models.rst create mode 100644 docs/reference/kombu.transport.sqlalchemy.rst create mode 100644 docs/reference/kombu.transport.virtual.exchange.rst create mode 100644 docs/reference/kombu.transport.virtual.rst create mode 100644 docs/reference/kombu.transport.virtual.scheduling.rst create mode 100644 docs/reference/kombu.transport.zmq.rst create mode 100644 docs/reference/kombu.transport.zookeeper.rst create mode 100644 docs/reference/kombu.utils.amq_manager.rst create mode 100644 docs/reference/kombu.utils.compat.rst create mode 100644 docs/reference/kombu.utils.debug.rst create mode 100644 docs/reference/kombu.utils.encoding.rst create mode 100644 docs/reference/kombu.utils.eventio.rst create mode 100644 docs/reference/kombu.utils.functional.rst create mode 100644 docs/reference/kombu.utils.limits.rst create mode 100644 docs/reference/kombu.utils.rst create mode 100644 docs/reference/kombu.utils.text.rst create mode 100644 docs/reference/kombu.utils.url.rst create mode 100644 docs/userguide/connections.rst create mode 100644 docs/userguide/consumers.rst create mode 100644 docs/userguide/examples.rst create mode 100644 docs/userguide/index.rst create mode 100644 docs/userguide/introduction.rst create mode 100644 docs/userguide/pools.rst create mode 100644 docs/userguide/producers.rst create mode 100644 docs/userguide/serialization.rst create mode 100644 docs/userguide/simple.rst create mode 100644 examples/complete_receive.py create mode 100644 examples/complete_send.py create mode 100644 examples/experimental/async_consume.py create mode 100644 examples/hello_consumer.py create mode 100644 examples/hello_publisher.py create mode 100644 examples/simple_eventlet_receive.py create mode 100644 examples/simple_eventlet_send.py create mode 100644 examples/simple_receive.py create mode 100644 examples/simple_send.py create mode 100644 examples/simple_task_queue/__init__.py create mode 100644 examples/simple_task_queue/client.py create mode 100644 examples/simple_task_queue/queues.py create mode 100644 examples/simple_task_queue/tasks.py create mode 100644 examples/simple_task_queue/worker.py create mode 100755 extra/doc2ghpages create mode 100755 extra/release/bump_version.py create mode 100755 extra/release/doc4allmods create mode 100755 extra/release/flakeplus.py create mode 100755 extra/release/removepyc.sh create mode 100755 extra/release/verify-reference-index.sh create mode 100644 funtests/__init__.py create mode 100644 funtests/setup.cfg create mode 100644 funtests/setup.py create mode 100644 funtests/tests/__init__.py create mode 100644 funtests/tests/test_SLMQ.py create mode 100644 funtests/tests/test_SQS.py create mode 100644 funtests/tests/test_amqp.py create mode 100644 funtests/tests/test_amqplib.py create mode 100644 funtests/tests/test_beanstalk.py create mode 100644 funtests/tests/test_couchdb.py create mode 100644 funtests/tests/test_django.py create mode 100644 funtests/tests/test_librabbitmq.py create mode 100644 funtests/tests/test_mongodb.py create mode 100644 funtests/tests/test_pyamqp.py create mode 100644 funtests/tests/test_redis.py create mode 100644 funtests/tests/test_sqla.py create mode 100644 funtests/tests/test_zookeeper.py create mode 100644 funtests/transport.py create mode 100644 kombu.egg-info/PKG-INFO create mode 100644 kombu.egg-info/SOURCES.txt create mode 100644 kombu.egg-info/dependency_links.txt create mode 100644 kombu.egg-info/not-zip-safe create mode 100644 kombu.egg-info/requires.txt create mode 100644 kombu.egg-info/top_level.txt create mode 100644 kombu/__init__.py create mode 100644 kombu/abstract.py create mode 100644 kombu/async/__init__.py create mode 100644 kombu/async/debug.py create mode 100644 kombu/async/hub.py create mode 100644 kombu/async/semaphore.py create mode 100644 kombu/async/timer.py create mode 100644 kombu/clocks.py create mode 100644 kombu/common.py create mode 100644 kombu/compat.py create mode 100644 kombu/compression.py create mode 100644 kombu/connection.py create mode 100644 kombu/entity.py create mode 100644 kombu/exceptions.py create mode 100644 kombu/five.py create mode 100644 kombu/log.py create mode 100644 kombu/message.py create mode 100644 kombu/messaging.py create mode 100644 kombu/mixins.py create mode 100644 kombu/pidbox.py create mode 100644 kombu/pools.py create mode 100644 kombu/serialization.py create mode 100644 kombu/simple.py create mode 100644 kombu/syn.py create mode 100644 kombu/tests/__init__.py create mode 100644 kombu/tests/async/__init__.py create mode 100644 kombu/tests/async/test_hub.py create mode 100644 kombu/tests/async/test_semaphore.py create mode 100644 kombu/tests/case.py create mode 100644 kombu/tests/mocks.py create mode 100644 kombu/tests/test_clocks.py create mode 100644 kombu/tests/test_common.py create mode 100644 kombu/tests/test_compat.py create mode 100644 kombu/tests/test_compression.py create mode 100644 kombu/tests/test_connection.py create mode 100644 kombu/tests/test_entities.py create mode 100644 kombu/tests/test_log.py create mode 100644 kombu/tests/test_messaging.py create mode 100644 kombu/tests/test_mixins.py create mode 100644 kombu/tests/test_pidbox.py create mode 100644 kombu/tests/test_pools.py create mode 100644 kombu/tests/test_serialization.py create mode 100644 kombu/tests/test_simple.py create mode 100644 kombu/tests/test_syn.py create mode 100644 kombu/tests/transport/__init__.py create mode 100644 kombu/tests/transport/test_SQS.py create mode 100644 kombu/tests/transport/test_amqplib.py create mode 100644 kombu/tests/transport/test_base.py create mode 100644 kombu/tests/transport/test_filesystem.py create mode 100644 kombu/tests/transport/test_librabbitmq.py create mode 100644 kombu/tests/transport/test_memory.py create mode 100644 kombu/tests/transport/test_mongodb.py create mode 100644 kombu/tests/transport/test_pyamqp.py create mode 100644 kombu/tests/transport/test_redis.py create mode 100644 kombu/tests/transport/test_sqlalchemy.py create mode 100644 kombu/tests/transport/test_transport.py create mode 100644 kombu/tests/transport/virtual/__init__.py create mode 100644 kombu/tests/transport/virtual/test_base.py create mode 100644 kombu/tests/transport/virtual/test_exchange.py create mode 100644 kombu/tests/transport/virtual/test_scheduling.py create mode 100644 kombu/tests/utils/__init__.py create mode 100644 kombu/tests/utils/test_amq_manager.py create mode 100644 kombu/tests/utils/test_debug.py create mode 100644 kombu/tests/utils/test_encoding.py create mode 100644 kombu/tests/utils/test_functional.py create mode 100644 kombu/tests/utils/test_utils.py create mode 100644 kombu/transport/SLMQ.py create mode 100644 kombu/transport/SQS.py create mode 100644 kombu/transport/__init__.py create mode 100644 kombu/transport/amqplib.py create mode 100644 kombu/transport/base.py create mode 100644 kombu/transport/beanstalk.py create mode 100644 kombu/transport/couchdb.py create mode 100644 kombu/transport/django/__init__.py create mode 100644 kombu/transport/django/management/__init__.py create mode 100644 kombu/transport/django/management/commands/__init__.py create mode 100644 kombu/transport/django/management/commands/clean_kombu_messages.py create mode 100644 kombu/transport/django/managers.py create mode 100644 kombu/transport/django/migrations/0001_initial.py create mode 100644 kombu/transport/django/migrations/__init__.py create mode 100644 kombu/transport/django/models.py create mode 100644 kombu/transport/filesystem.py create mode 100644 kombu/transport/librabbitmq.py create mode 100644 kombu/transport/memory.py create mode 100644 kombu/transport/mongodb.py create mode 100644 kombu/transport/pyamqp.py create mode 100644 kombu/transport/pyro.py create mode 100644 kombu/transport/redis.py create mode 100644 kombu/transport/sqlalchemy/__init__.py create mode 100644 kombu/transport/sqlalchemy/models.py create mode 100644 kombu/transport/virtual/__init__.py create mode 100644 kombu/transport/virtual/exchange.py create mode 100644 kombu/transport/virtual/scheduling.py create mode 100644 kombu/transport/zmq.py create mode 100644 kombu/transport/zookeeper.py create mode 100644 kombu/utils/__init__.py create mode 100644 kombu/utils/amq_manager.py create mode 100644 kombu/utils/compat.py create mode 100644 kombu/utils/debug.py create mode 100644 kombu/utils/encoding.py create mode 100644 kombu/utils/eventio.py create mode 100644 kombu/utils/functional.py create mode 100644 kombu/utils/limits.py create mode 100644 kombu/utils/text.py create mode 100644 kombu/utils/url.py create mode 100644 requirements/default.txt create mode 100644 requirements/dev.txt create mode 100644 requirements/docs.txt create mode 100644 requirements/extras/beanstalk.txt create mode 100644 requirements/extras/couchdb.txt create mode 100644 requirements/extras/kazoo.txt create mode 100644 requirements/extras/librabbitmq.txt create mode 100644 requirements/extras/mongodb.txt create mode 100644 requirements/extras/msgpack.txt create mode 100644 requirements/extras/pyro.txt create mode 100644 requirements/extras/redis.txt create mode 100644 requirements/extras/slmq.txt create mode 100644 requirements/extras/sqlalchemy.txt create mode 100644 requirements/extras/sqs.txt create mode 100644 requirements/extras/yaml.txt create mode 100644 requirements/extras/zeromq.txt create mode 100644 requirements/extras/zookeeper.txt create mode 100644 requirements/funtest.txt create mode 100644 requirements/pkgutils.txt create mode 100644 requirements/py26.txt create mode 100644 requirements/test-ci.txt create mode 100644 requirements/test-ci3.txt create mode 100644 requirements/test.txt create mode 100644 requirements/test3.txt create mode 100644 setup.cfg create mode 100644 setup.py diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..52f216e --- /dev/null +++ b/AUTHORS @@ -0,0 +1,118 @@ +========= + AUTHORS +========= +:order: sorted + +Adam Gaca +Adam Nelson +Adam Wentz +Alex Koshelev +Alexandre Bourget +Andrew Watts +Andrey Antukh +Andrii Kostenko +Andy McCurdy +Antoine Legrand +Anton Gyllenberg +Ask Solem +Basil Mironenko +Bobby Beever +Brian Bernstein +C Anthony Risinger +Christophe Chauvet +Christopher Grebs +Clay Gerrard +Corentin Ardeois +Dan LaMotte +Dan McGee +Dane Guempel +Davanum Srinivas +David Clymer +David Gelvin +David Strauss +David Ziegler +Dhananjay Nene +Dmitry Malinovsky +Dustin J. Mitchell +Ephemera +Eric Reynolds +Fabrice Rabaute +Felix Schwarz +Fernando Jorge Mota +Flavio [FlaPer87] Percoco Premoli +Florian Munz +Franck Cuny +Germán M. Bravo +Gregory Haskins +Hong Minhee +Ian Eure +Ian Struble +Ionel Maries Cristian +James Saryerwinnie +James Turk +Jason Cater +Jasper Bryant-Greene +Jeff Balogh +Jesper Thomschütz +John Shuping +John Spray +John Watson +Jonathan Halcrow +Joseph Crosland +Keith Fitzgerald +Kevin McCarthy +Kevin McDonald +Latitia M. Haskins +Len Buckens +Mahendra M +Marcin Lulek (ergo) +Mark Lavin +Matt Wise +Maxime Rouyrre +Mher Movsisyan +Michael Barrett +Michael Nelson +Nitzan Miron +Noah Kantrowitz +Ollie Walsh +Pascal Hartig +Patrick Schneider +Paul McLanahan +Petar Radosevic +Peter Hoffmann +Pierre Riteau +Rafael Duran Castaneda +Rafal Malinowski +Ralf Nyren +Randy Barlow +Rob Ottaway +Roger Hu +Rumyana Neykova +Rune Halvorsen +Ryan Petrello +Sam Stavinoha +Sascha Peilicke +Scott Lyons +Sean Bleier +Sean Creeley +Seb Insua +Shane Caraveo +Steeve Morin +Stefan Eletzhofer +Stephan Jaekel +Stephen Day +Tareque Hossain +Thomas Johansson +Tobias Schottdorf +Tomaž Muraus +Tommie McAfee +Travis Cline +Travis Swicegood +Victor Garcia +Viet Hung Nguyen +Vince Gonzalez +Vincent Driessen +Zach Smith +Zhao Xiaohong +haridsv +iSlava diff --git a/Changelog b/Changelog new file mode 100644 index 0000000..28c6107 --- /dev/null +++ b/Changelog @@ -0,0 +1,3012 @@ +.. _changelog: + +================ + Change history +================ + +.. _version-3.0.21: + +3.0.21 +====== +:release-date: 2014-07-07 02:00 P.M UTC +:release-by: Ask Solem + +- Fixed remaining bug in ``maybe_declare`` for ``auto_delete`` exchanges. + + Fix contributed by Roger Hu. + +- MongoDB: Creating a channel now properly evaluates a connection (Issue #363). + + Fix contributed by Len Buckens. + +.. _version-3.0.20: + +3.0.20 +====== +:release-date: 2014-06-24 02:30 P.M UTC +:release-by: Ask Solem + +- Reverts change in 3.0.17 where ``maybe_declare`` caches the declaration + of auto_delete queues and exchanges. + + Fix contributed by Roger Hu. + +- Redis: Fixed race condition when using gevent and the channel is closed. + + Fix contributed by Andrew Rodionoff. + +.. _version-3.0.19: + +3.0.19 +====== +:release-date: 2014-06-09 03:10 P.M UTC +:release-by: Ask Solem + +- The wheel distribution did not support Python 2.6 by failing to list + the extra dependencies required. + +- Durable and auto_delete queues/exchanges can be be cached using + ``maybe_declare``. + +.. _version-3.0.18: + +3.0.18 +====== +:release-date: 2014-06-02 06:00 P.M UTC +:release-by: Ask Solem + +- A typo introduced in 3.0.17 caused kombu.async.hub to crash (Issue #360). + +.. _version-3.0.17: + +3.0.17 +====== +:release-date: 2014-06-02 05:00 P.M UTC +:release-by: Ask Solem + +- ``kombu[librabbitmq]`` now depends on librabbitmq 1.5.2. + +- Async: Event loop now selectively removes file descriptors for the mode + it failed in, and keeps others (e.g read vs write). + + Fix contributed by Roger Hu. + +- CouchDB: Now works without userid set. + + Fix contributed by Latitia M. Haskins. + +- SQLAlchemy: Now supports recovery from connection errors. + + Contributed by Felix Schwarz. + +- Redis: Restore at shutdown now works when ack emulation is disabled. + +- :func:`kombu.common.eventloop` accidentally swallowed socket errors. + +- Adds :func:`kombu.utils.url.sanitize_url` + +.. _version-3.0.16: + +3.0.16 +====== +:release-date: 2014-05-06 01:00 P.M UTC +:release-by: Ask Solem + +- ``kombu[librabbitmq]`` now depends on librabbitmq 1.5.1. + +- Redis: Fixes ``TypeError`` problem in ``unregister`` (Issue #342). + + Fix contributed by Tobias Schottdorf. + +- Tests: Some unit tests accidentally required the `redis-py` library. + + Fix contributed by Randy Barlow. + +- librabbitmq: Would crash when using an older version of :mod:`librabbitmq`, + now emits warning instead. + +.. _version-3.0.15: + +3.0.15 +====== +:release-date: 2014-04-15 09:00 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.4.5. + +- RabbitMQ 3.3 changes QoS semantics (Issue #339). + + See the RabbitMQ release notes here: + http://www.rabbitmq.com/blog/2014/04/02/breaking-things-with-rabbitmq-3-3/ + + A new connection property has been added that can be used to detect + whether the remote server is using this new QoS behavior:: + + >>> Connection('amqp://').qos_behavior_matches_spec + False + + so if your application depends on the old semantics you can + use this to set the ``apply_global`` flag appropriately:: + + def update_prefetch_count(channel, new_value): + channel.basic_qos( + 0, new_value, + not channel.connection.client.qos_behavior_matches_spec, + ) + +- Users of :mod:`librabbitmq` is encouraged to upgrade to librabbitmq 1.5.0. + + The ``kombu[librabbitmq]`` extra has been updated to depend on this + version. + +- Pools: Now takes transport options into account when comparing connections + (Issue #333). + +- MongoDB: Fixes Python 3 compatibility. + +- Async: select: Ignore socket errors when attempting to unregister handles + from the loop. + +- Pidbox: Can now be configured to use a serializer other than json, + but specifying a serializer argument to :class:`~kombu.pidbox.Mailbox`. + + Contributed by Dmitry Malinovsky. + +- Message decompression now works with Python 3. + + Fix contributed by Adam Gaca. + +.. _version-3.0.14: + +3.0.14 +====== +:release-date: 2014-03-19 07:00 P.M UTC +:release-by: Ask Solem + +- **MongoDB**: Now endures a connection failover (Issue #123). + + Fix contributed by Alex Koshelev. + +- **MongoDB**: Fixed ``KeyError`` when a replica set member is removed. + + Also fixes celery#971 and celery/#898. + + Fix contributed by Alex Koshelev. + +- **MongoDB**: Fixed MongoDB broadcast cursor re-initialization bug. + + Fix contributed by Alex Koshelev. + +- **Async**: Fixed bug in lax semaphore implementation where in + some usage patterns the limit was not honored correctly. + + Fix contributed by Ionel Cristian Mărieș. + +- **Redis**: Fixed problem with fanout when using Python 3 (Issue #324). + +- **Redis**: Fixed ``AttributeError`` from attempting to close a non-existing + connection (Issue #320). + +.. _version-3.0.13: + +3.0.13 +====== +:release-date: 2014-03-03 04:00 P.M UTC +:release-by: Ask Solem + +- Redis: Fixed serious race condition that could lead to data loss. + + The delivery tags were accidentally set to be an incremental number + local to the channel, but the delivery tags need to be globally + unique so that a message can not overwrite an older message + in the backup store. + + This change is not backwards incompatible and you are encouraged + to update all your system using a previous version as soon as possible. + +- Now depends on :mod:`amqp` 1.4.4. + +- Pidbox: Now makes sure message encoding errors are handled by default, + so that a custom error handler does not need to be specified. + +- Redis: The fanout exchange can now use AMQP patterns to route and filter + messages. + + This change is backwards incompatible and must be enabled with + the ``fanout_patterns`` transport option:: + + >>> conn = kombu.Connection('redis://', transport_options={ + ... 'fanout_patterns': True, + ... }) + + When enabled the exchange will work like an amqp topic exchange + if the binding key is a pattern. + + This is planned to be default behavior in the future. + +- Redis: Fixed ``cycle`` no such attribute error. + +.. _version-3.0.12: + +3.0.12 +====== +:release-date: 2014-02-09 03:50 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.4.3. + +- Fixes Python 3.4 logging incompatibility (Issue #311). + +- Redis: Now properly handles unknown pub/sub messages. + + Fix contributed by Sam Stavinoha. + +- amqplib: Fixed bug where more bytes were requested from the socket + than necessary. + + Fix contributed by Ionel Cristian Mărieș. + +.. _version-3.0.11: + +3.0.11 +====== +:release-date: 2014-02-03 05:00 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.4.2. + +- Now always trusts messages of type `application/data` and `application/text` + or which have an unspecified content type (Issue #306). + +- Compression errors are now handled as decode errors and will trigger + the ``Consumer.on_decode_error`` callback if specified. + +- New ``kombu.Connection.get_heartbeat_interval()`` method that can be + used to access the negotiated heartbeat value. + +- `kombu.common.oid_for` no longer uses the MAC address of the host, but + instead uses a process-wide UUID4 as a node id. + + This avoids a call to `uuid.getnode()` at module scope. + +- Hub.add: Now normalizes registered fileno. + + Contributed by Ionel Cristian Mărieș. + +- SQS: Fixed bug where the prefetch count limit was not respected. + +.. _version-3.0.10: + +3.0.10 +====== +:release-date: 2014-01-17 05:40 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.4.1. + +- ``maybe_declare`` now raises a "recoverable connection error" if + the channel is disconnected instead of a :exc:`ChannelError` so that + the operation can be retried. + +- Redis: ``Consumer.cancel()`` is now thread safe. + + This fixes an issue when using gevent/eventlet and a + message is handled after the consumer is cancelled resulting + in a "message for queue without consumers" error. + +- Retry operations would not always respect the interval_start + value when calculating the time to sleep for (Issue #303). + + Fix contributed by Antoine Legrand. + +- Timer: Fixed "unhashable type" error on Python 3. + +- Hub: Do not attempt to unregister operations on an already closed + poller instance. + +.. _version-3.0.9: + +3.0.9 +===== +:release-date: 2014-01-13 05:30 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.4.0. + +- Redis: Basic cancel for fanout based queues now sends a corresponding + ``UNSUBSCRIBE`` command to the server. + + This fixes an issue with pidbox where reply messages could be received + after the consumer was cancelled, giving the ``"message to queue without + consumers"`` error. + +- MongoDB: Improved connection string and options handling + (Issue #266 + Issue #120). + + Contributed by Alex Koshelev. + +- SQS: Limit the number of messages when receiving in batch to 10. + + This is a hard limit enforced by Amazon so the sqs transport + must not exceeed this value. + + Fix contributed by Eric Reynolds. + +- ConsumerMixin: ``consume`` now checks heartbeat every time the + socket times out. + + Contributed by Dustin J. Mitchell. + +- Retry Policy: A max retries of 0 did not retry forever. + + Fix contributed by Antoine Legrand. + +- Simple: If passing a Queue object the simple utils will now take + default routing key from that queue. + + Contributed by Fernando Jorge Mota. + +- ``repr(producer)`` no longer evaluates the underlying channnel. + +- Redis: The map of Redis error classes are now exposed at the module level + using the :func:`kombu.transport.redis.get_redis_error_classes` function. + +- Async: ``Hub.close`` now sets ``.poller`` to None. + +.. _version-3.0.8: + +3.0.8 +===== +:release-date: 2013-12-16 05:00 P.M UTC +:release-by: Ask Solem + +- Serializer: loads and dumps now wraps exceptions raised into + :exc:`~kombu.exceptions.DecodeError` and + :exc:`kombu.exceptions.EncodeError` respectively. + + Contributed by Ionel Cristian Maries + +- Redis: Would attempt to read from the wrong connection if a select/epoll/kqueue + exception event happened. + + Fix contributed by Michael Nelson. + +- Redis: Disabling ack emulation now works properly. + + Fix contributed by Michael Nelson. + +- Redis: :exc:`IOError` and :exc:`OSError` are now treated as recoverable + connection errors. + +- SQS: Improved performance by reading messages in bulk. + + Contributed by Matt Wise. + +- Connection Pool: Attempting to acquire from a closed pool will now + raise :class:`RuntimeError`. + +.. _version-3.0.7: + +3.0.7 +===== +:release-date: 2013-12-02 04:00 P.M UTC +:release-by: Ask Solem + +- Fixes Python 2.6 compatibility. + +- Redis: Fixes 'bad file descriptor' issue. + +.. _version-3.0.6: + +3.0.6 +===== +:release-date: 2013-11-21 04:50 P.M UTC +:release-by: Ask Solem + +- Timer: No longer attempts to hash keyword arguments (Issue #275). + +- Async: Did not account for the long type for file descriptors. + + Fix contributed by Fabrice Rabaute. + +- PyPy: kqueue support was broken. + +- Redis: Bad pub/sub payloads no longer crashes the consumer. + +- Redis: Unix socket URLs can now specify a virtual host by including + it as a query parameter. + + Example URL specifying a virtual host using database number 3:: + + redis+socket:///tmp/redis.sock?virtual_host=3 + +- ``kombu.VERSION`` is now a named tuple. + +.. _version-3.0.5: + +3.0.5 +===== +:release-date: 2013-11-15 11:00 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.3.3. + +- Redis: Fixed Python 3 compatibility problem (Issue #270). + +- MongoDB: Fixed problem with URL parsing when authentication used. + + Fix contributed by dongweiming. + +- pyamqp: Fixed small issue when publishing the message and + the property dictionary was set to None. + + Fix contributed by Victor Garcia. + +- Fixed problem in ``repr(LaxBoundedSemaphore)``. + + Fix contributed by Antoine Legrand. + +- Tests now passing on Python 3.3. + +.. _version-3.0.4: + +3.0.4 +===== +:release-date: 2013-11-08 01:00 P.M UTC +:release-by: Ask Solem + +- common.QoS: ``decrement_eventually`` now makes sure the value + does not go below 1 if a prefetch count is enabled. + +.. _version-3.0.3: + +3.0.3 +===== +:release-date: 2013-11-04 03:00 P.M UTC +:release-by: Ask Solem + +- SQS: Properly reverted patch that caused delays between messages. + + Contributed by James Saryerwinnie + +- select: Clear all registerd fds on poller.cloe + +- Eventloop: unregister if EBADF raised. + +.. _version-3.0.2: + +3.0.2 +===== +:release-date: 2013-10-29 02:00 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` version 1.3.2. + +- select: Fixed problem where unregister did not properly remove + the fd. + +.. _version-3.0.1: + +3.0.1 +===== +:release-date: 2013-10-24 04:00 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` version 1.3.1. + +- Redis: New option ``fanout_keyprefix`` + + This transport option is recommended for all users as it ensures + that broadcast (fanout) messages sent is only seen by the current + virtual host:: + + Connection('redis://', transport_options={'fanout_keyprefix': True}) + + However, enabling this means that you cannot send or receive messages + from older Kombu versions so make sure all of your participants + are upgraded and have the transport option enabled. + + This will be the default behavior in Kombu 4.0. + +- Distribution: Removed file ``requirements/py25.txt``. + +- MongoDB: Now disables ``auto_start_request``. + +- MongoDB: Enables ``use_greenlets`` if eventlet/gevent used. + +- Pidbox: Fixes problem where expires header was None, + which is a value not supported by the amq protocol. + +- ConsumerMixin: New ``consumer_context`` method for starting + the consumer without draining events. + +.. _version-3.0.0: + +3.0.0 +===== +:release-date: 2013-10-14 04:00 P.M BST +:release-by: Ask Solem + +- Now depends on :mod:`amqp` version 1.3. + +- No longer supports Python 2.5 + + The minimum Python version supported is now Python 2.6.0 for Python2, + and Python 3.3 for Python3. + +- Dual codebase supporting both Python 2 and 3. + + No longer using ``2to3``, making it easier to maintain support for + both versions. + +- pickle, yaml and msgpack deserialization is now disabled by default. + + This means that Kombu will by default refuse to handle any content type other + than json. + + Pickle is known to be a security concern as it will happily + load any object that is embedded in a pickle payload, and payloads + can be crafted to do almost anything you want. The default + serializer in Kombu is json but it also supports a number + of other serialization formats that it will evaluate if received: + including pickle. + + It was always assumed that users were educated about the security + implications of pickle, but in hindsight we don't think users + should be expected to secure their services if we have the ability to + be secure by default. + + By disabling any content type that the user did not explicitly + want enabled we ensure that the user must be conscious when they + add pickle as a serialization format to support. + + The other built-in serializers (yaml and msgpack) are also disabled + even though they aren't considered insecure [#f1]_ at this point. + Instead they're disabled so that if a security flaw is found in one of these + libraries in the future, you will only be affected if you have + explicitly enabled them. + + To have your consumer accept formats other than json you have to + explicitly add the wanted formats to a white-list of accepted + content types:: + + >>> c = Consumer(conn, accept=['json', 'pickle', 'msgpack']) + + or when using synchronous access:: + + >>> msg = queue.get(accept=['json', 'pickle', 'msgpack']) + + The ``accept`` argument was first supported for consumers in version + 2.5.10, and first supported by ``Queue.get`` in version 2.5.15 + so to stay compatible with previous versions you can enable + the previous behavior: + + >>> from kombu import enable_insecure_serializers + >>> enable_insecure_serializers() + + But note that this has global effect, so be very careful should you use it. + + .. rubric:: Footnotes + + .. [#f1] The PyYAML library has a :func:`yaml.load` function with some of the + same security implications as pickle, but Kombu uses the + :func:`yaml.safe_load` function which is not known to be affected. + +- kombu.async: Experimental event loop implementation. + + This code was previously in Celery but was moved here + to make it easier for async transport implementations. + + The API is meant to match the Tulip API which will be included + in Python 3.4 as the ``asyncio`` module. It's not a complete + implementation obviously, but the goal is that it will be easy + to change to it once that is possible. + +- Utility function ``kombu.common.ipublish`` has been removed. + + Use ``Producer(..., retry=True)`` instead. + +- Utility function ``kombu.common.isend_reply`` has been removed + + Use ``send_reply(..., retry=True)`` instead. + +- ``kombu.common.entry_to_queue`` and ``kombu.messaging.entry_to_queue`` + has been removed. + + Use ``Queue.from_dict(name, **options)`` instead. + +- Redis: Messages are now restored at the end of the list. + + Contributed by Mark Lavin. + +- ``StdConnectionError`` and ``StdChannelError`` is removed + and :exc:`amqp.ConnectionError` and :exc:`amqp.ChannelError` is used + instead. + +- Message object implementation has moved to :class:`kombu.message.Message`. + +- Serailization: Renamed functions encode/decode to + :func:`~kombu.serialization.dumps` and :func:`~kombu.serialization.loads`. + + For backward compatibility the old names are still available as aliases. + +- The ``kombu.log.anon_logger`` function has been removed. + + Use :func:`~kombu.log.get_logger` instead. + +- ``queue_declare`` now returns namedtuple with ``queue``, ``message_count``, + and ``consumer_count`` fields. + +- LamportClock: Can now set lock class + +- :mod:`kombu.utils.clock`: Utilities for ordering events added. + +- :class:`~kombu.simple.SimpleQueue` now allows you to override + the exchange type used. + + Contributed by Vince Gonzales. + +- Zookeeper transport updated to support new changes in the :mod:`kazoo` + library. + + Contributed by Mahendra M. + +- pyamqp/librabbitmq: Transport options are now forwarded as keyword arguments + to the underlying connection (Issue #214). + +- Transports may now distinguish between recoverable and irrecoverable + connection and channel errors. + +- ``kombu.utils.Finalize`` has been removed: Use + :mod:`multiprocessing.util.Finalize` instead. + +- Memory transport now supports the fanout exchange type. + + Contributed by Davanum Srinivas. + +- Experimental new `Pyro`_ transport (:mod:`kombu.transport.pyro`). + + Contributed by Tommie McAfee. + +.. _`Pyro`: http://pythonhosted.org/Pyro + +- Experimental new `SoftLayer MQ`_ transport (:mod:`kombu.transport.SLMQ`). + + Contributed by Kevin McDonald + +.. _`SoftLayer MQ`: http://www.softlayer.com/services/additional/message-queue + +- Eventio: Kqueue breaks in subtle ways so select is now used instead. + +- SQLAlchemy transport: Can now specify table names using the + ``queue_tablename`` and ``message_tablename`` transport options. + + Contributed by Ryan Petrello. + +Redis transport: Now supports using local UNIX sockets to communicate with the + Redis server (Issue #1283) + + To connect using a UNIX socket you have to use the ``redis+socket`` + URL-prefix: ``redis+socket:///tmp/redis.sock``. + + This functionality was merged from the `celery-redis-unixsocket`_ project. + Contributed by Maxime Rouyrre. + +ZeroMQ transport: drain_events now supports timeout. + + Contributed by Jesper Thomschütz. + +.. _`celery-redis-unixsocket`: + https://github.com/piquadrat/celery-redis-unixsocket + +.. _version-2.5.16: + +2.5.16 +====== +:release-date: 2013-10-04 03:30 P.M BST +:release-by: Ask Solem + +- Python3: Fixed problem with dependencies not being installed. + +.. _version-2.5.15: + +2.5.15 +====== +:release-date: 2013-10-04 03:30 P.M BST +:release-by: Ask Solem + +- Declaration cache: Now only keeps hash of declaration + so that it does not keep a reference to the channel. + +- Declaration cache: Now respects ``entity.can_cache_declaration`` + attribute. + +- Fixes Python 2.5 compatibility. + +- Fixes tests after python-msgpack changes. + +- ``Queue.get``: Now supports ``accept`` argument. + +.. _version-2.5.14: + +2.5.14 +====== +:release-date: 2013-08-23 05:00 P.M BST +:release-by: Ask Solem + +- safe_str did not work properly resulting in + :exc:`UnicodeDecodeError` (Issue #248). + +.. _version-2.5.13: + +2.5.13 +====== +:release-date: 2013-08-16 04:00 P.M BST +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.0.13 + +- Fixed typo in Django functional tests. + +- safe_str now returns Unicode in Python 2.x + + Fix contributed by Germán M. Bravo. + +- amqp: Transport options are now merged with arguments + supplied to the connection. + +- Tests no longer depends on distribute, which was deprecated + and merged back into setuptools. + + Fix contributed by Sascha Peilicke. + +- ConsumerMixin now also restarts on channel related errors. + + Fix contributed by Corentin Ardeois. + +.. _version-2.5.12: + +2.5.12 +====== +:release-date: 2013-06-28 03:30 P.M BST +:release-by: Ask Solem + +- Redis: Ignore errors about keys missing in the round-robin cycle. + +- Fixed test suite errors on Python 3. + +- Fixed msgpack test failures. + +.. _version-2.5.11: + +2.5.11 +====== +:release-date: 2013-06-25 02:30 P.M BST +:release-by: Ask Solem + +- Now depends on amqp 1.0.12 (Py3 compatibility issues). + +- MongoDB: Removed cause of a "database name in URI is being ignored" + warning. + + Fix by Flavio Percoco Premoli + +- Adds ``passive`` option to :class:`~kombu.Exchange`. + + Setting this flag means that the exchange will not be declared by kombu, + but that it must exist already (or an exception will be raised). + + Contributed by Rafal Malinowski + +- Connection.info() now gives the current hostname and not the list of + available hostnames. + + Fix contributed by John Shuping. + +- pyamqp: Transport options are now forwarded as kwargs to ``amqp.Connection``. + +- librabbitmq: Transport options are now forwarded as kwargs to + ``librabbitmq.Connection``. + +- librabbitmq: Now raises :exc:`NotImplementedError` if SSL is enabled. + + The librabbitmq library does not support ssl, + but you can use stunnel or change to the ``pyamqp://`` transport + instead. + + Fix contributed by Dan LaMotte. + +- librabbitmq: Fixed a cyclic reference at connection close. + +- eventio: select implementation now removes bad file descriptors. + +- eventio: Fixed Py3 compatibility problems. + +- Functional tests added for py-amqp and librabbitmq transports. + +- Resource.force_close_all no longer uses a mutex. + +- Pidbox: Now ignores `IconsistencyError` when sending replies, + as this error simply means that the client may no longer be alive. + +- Adds new :meth:`Connection.collect <~kombu.Connection.collect>` method, + that can be used to clean up after connections without I/O. + +- ``queue_bind`` is no longer called for queues bound to + the "default exchange" (Issue #209). + + Contributed by Jonathan Halcrow. + +- The max_retries setting for retries was not respected correctly (off by one). + +.. _version-2.5.10: + +2.5.10 +====== +:release-date: 2013-04-11 06:10 P.M BST +:release-by: Ask Solem + +Note about upcoming changes for Kombu 3.0 +----------------------------------------- + +Kombu 3 consumers will no longer accept pickle/yaml or msgpack +by default, and you will have to explicitly enable untrusted deserializers +either globally using :func:`kombu.enable_insecure_serializers`, or +using the ``accept`` argument to :class:`~kombu.Consumer`. + +Changes +------- + +- New utility function to disable/enable untrusted serializers. + + - :func:`kombu.disable_insecure_serializers` + - :func:`kombu.enable_insecure_serializers`. + +- Consumer: ``accept`` can now be used to specify a whitelist + of content types to accept. + + If the accept whitelist is set and a message is received + with a content type that is not in the whitelist then a + :exc:`~kombu.exceptions.ContentDisallowed` exception + is raised. Note that this error can be handled by the already + existing `on_decode_error` callback + + Examples:: + + Consumer(accept=['application/json']) + Consumer(accept=['pickle', 'json']) + +- Now depends on amqp 1.0.11 + +- pidbox: Mailbox now supports the ``accept`` argument. + +- Redis: More friendly error for when keys are missing. + +- Connection URLs: The parser did not work well when there were + multiple '+' tokens. + +.. _version-2.5.9: + +2.5.9 +===== +:release-date: 2013-04-08 05:07 P.M BST +:release-by: Ask Solem + +- Pidbox: Now warns if there are multiple nodes consuming from + the same pidbox. + +- Adds :attr:`Queue.on_declared ` + + A callback to be called when the queue is declared, + with signature ``(name, messages, consumers)``. + +- Now uses fuzzy matching to suggest alternatives to typos in transport + names. + +- SQS: Adds new transport option ``queue_prefix``. + + Contributed by j0hnsmith. + +- pyamqp: No longer overrides verify_connection. + +- SQS: Now specifies the ``driver_type`` and ``driver_name`` + attributes. + + Fix contributed by Mher Movsisyan. + +- Fixed bug with ``kombu.utils.retry_over_time`` when no errback + specified. + + +.. _version-2.5.8: + +2.5.8 +===== +:release-date: 2013-03-21 04:00 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.0.10 which fixes a Python 3 compatibility error. + +- Redis: Fixed a possible race condition (Issue #171). + +- Redis: Ack emulation/visibility_timeout can now be disabled + using a transport option. + + Ack emulation adds quite a lot of overhead to ensure data is safe + even in the event of an unclean shutdown. If data loss do not worry + you there is now an `ack_emulation` transport option you can use + to disable it:: + + Connection('redis://', transport_options={'ack_emulation': False}) + +- SQS: Fixed :mod:`boto` v2.7 compatibility (Issue #207). + +- Exchange: Should not try to re-declare default exchange (``""``) + (Issue #209). + +- SQS: Long polling is now disabled by default as it was not + implemented correctly, resulting in long delays between receiving + messages (Issue #202). + +- Fixed Python 2.6 incompatibility depending on ``exc.errno`` + being available. + + Fix contributed by Ephemera. + +.. _version-2.5.7: + +2.5.7 +===== +:release-date: 2013-03-08 01:00 P.M UTC +:release-by: Ask Solem + +- Now depends on amqp 1.0.9 + +- Redis: A regression in 2.5.6 caused the redis transport to + ignore options set in ``transport_options``. + +- Redis: New ``socket_timeout`` transport option. + +- Redis: ``InconsistencyError`` is now regarded as a recoverable error. + +- Resource pools: Will no longer attempt to release resource + that was never acquired. + +- MongoDB: Now supports the ``ssl`` option. + + Contributed by Sebastian Pawlus. + +.. _version-2.5.6: + +2.5.6 +===== +:release-date: 2013-02-08 01:00 P.M UTC +:release-by: Ask Solem + +- Now depends on amqp 1.0.8 which works around a bug found on some + Python 2.5 installations where 2**32 overflows to 0. + +.. _version-2.5.5: + +2.5.5 +===== +:release-date: 2013-02-07 05:00 P.M UTC +:release-by: Ask Solem + +SQS: Now supports long polling (Issue #176). + + The polling interval default has been changed to 0 and a new + transport option (``wait_time_seconds``) has been added. + This parameter specifies how long to wait for a message from + SQS, and defaults to 20 seconds, which is the maximum + value currently allowed by Amazon SQS. + + Contributed by James Saryerwinnie. + +- SQS: Now removes unpickleable fields before restoring messages. + +- Consumer.__exit__ now ignores exceptions occurring while + cancelling the consumer. + +- Virtual: Routing keys can now consist of characters also used + in regular expressions (e.g. parens) (Issue #194). + +- Virtual: Fixed compression header when restoring messages. + + Fix contributed by Alex Koshelev. + +- Virtual: ack/reject/requeue now works while using ``basic_get``. + +- Virtual: Message.reject is now supported by virtual transports + (requeue depends on individual transport support). + +- Fixed typo in hack used for static analyzers. + + Fix contributed by Basil Mironenko. + +.. _version-2.5.4: + +2.5.4 +===== +:release-date: 2012-12-10 12:35 P.M UTC +:release-by: Ask Solem + +- Fixed problem with connection clone and multiple URLs (Issue #182). + + Fix contributed by Dane Guempel. + +- zeromq: Now compatible with libzmq 3.2.x. + + Fix contributed by Andrey Antukh. + +- Fixed Python 3 installation problem (Issue #187). + +.. _version-2.5.3: + +2.5.3 +===== +:release-date: 2012-11-29 12:35 P.M UTC +:release-by: Ask Solem + +- Pidbox: Fixed compatibility with Python 2.6 + +2.5.2 +===== +:release-date: 2012-11-29 12:35 P.M UTC +:release-by: Ask Solem + +.. _version-2.5.2: + +2.5.2 +===== +:release-date: 2012-11-29 12:35 P.M UTC +:release-by: Ask Solem + +- [Redis] Fixed connection leak and added a new 'max_connections' transport + option. + +.. _version-2.5.1: + +2.5.1 +===== +:release-date: 2012-11-28 12:45 P.M UTC +:release-by: Ask Solem + +- Fixed bug where return value of Queue.as_dict could not be serialized with + JSON (Issue #177). + +.. _version-2.5.0: + +2.5.0 +===== +:release-date: 2012-11-27 04:00 P.M UTC +:release-by: Ask Solem + +- `py-amqp`_ is now the new default transport, replacing ``amqplib``. + + The new `py-amqp`_ library is a fork of amqplib started with the + following goals: + + - Uses AMQP 0.9.1 instead of 0.8 + - Support for heartbeats (Issue #79 + Issue #131) + - Automatically revives channels on channel errors. + - Support for all RabbitMQ extensions + - Consumer Cancel Notifications (Issue #131) + - Publisher Confirms (Issue #131). + - Exchange-to-exchange bindings: ``exchange_bind`` / ``exchange_unbind``. + - API compatible with :mod:`librabbitmq` so that it can be used + as a pure-python replacement in environments where rabbitmq-c cannot + be compiled. librabbitmq will be updated to support all the same + features as py-amqp. + +- Support for using multiple connection URL's for failover. + + The first argument to :class:`~kombu.Connection` can now be a list of + connection URLs: + + .. code-block:: python + + Connection(['amqp://foo', 'amqp://bar']) + + or it can be a single string argument with several URLs separated by + semicolon: + + .. code-block:: python + + Connection('amqp://foo;amqp://bar') + + There is also a new keyword argument ``failover_strategy`` that defines + how :meth:`~kombu.Connection.ensure_connection`/ + :meth:`~kombu.Connection.ensure`/:meth:`kombu.Connection.autoretry` will + reconnect in the event of connection failures. + + The default reconnection strategy is ``round-robin``, which will simply + cycle through the list forever, and there's also a ``shuffle`` strategy + that will select random hosts from the list. Custom strategies can also + be used, in that case the argument must be a generator yielding the URL + to connect to. + + Example: + + .. code-block:: python + + Connection('amqp://foo;amqp://bar') + +- Now supports PyDev, PyCharm, pylint and other static code analysis tools. + +- :class:`~kombu.Queue` now supports multiple bindings. + + You can now have multiple bindings in the same queue by having + the second argument be a list: + + .. code-block:: python + + from kombu import binding, Queue + + Queue('name', [ + binding(Exchange('E1'), routing_key='foo'), + binding(Exchange('E1'), routing_key='bar'), + binding(Exchange('E2'), routing_key='baz'), + ]) + + To enable this, helper methods have been added: + + - :meth:`~kombu.Queue.bind_to` + - :meth:`~kombu.Queue.unbind_from` + + Contributed by Rumyana Neykova. + +- Custom serializers can now be registered using Setuptools entry-points. + + See :ref:`serialization-entrypoints`. + +- New :class:`kombu.common.QoS` class used as a thread-safe way to manage + changes to a consumer or channels prefetch_count. + + This was previously an internal class used in Celery now moved to + the :mod:`kombu.common` module. + +- Consumer now supports a ``on_message`` callback that can be used to process + raw messages (not decoded). + + Other callbacks specified using the ``callbacks`` argument, and + the ``receive`` method will be not be called when a on message callback + is present. + +- New utility :func:`kombu.common.ignore_errors` ignores connection and + channel errors. + + Must only be used for cleanup actions at shutdown or on connection loss. + +- Support for exchange-to-exchange bindings. + + The :class:`~kombu.Exchange` entity gained ``bind_to`` + and ``unbind_from`` methods: + + .. code-block:: python + + e1 = Exchange('A')(connection) + e2 = Exchange('B')(connection) + + e2.bind_to(e1, routing_key='rkey', arguments=None) + e2.unbind_from(e1, routing_key='rkey', arguments=None) + + This is currently only supported by the ``pyamqp`` transport. + + Contributed by Rumyana Neykova. + +.. _version-2.4.10: + +2.4.10 +====== +:release-date: 2012-11-22 06:00 P.M UTC +:release-by: Ask Solem + +- The previous versions connection pool changes broke Redis support so that + it would always connect to localhost (default setting) no matter what + connection parameters were provided (Issue #176). + +.. _version-2.4.9: + +2.4.9 +===== +:release-date: 2012-11-21 03:00 P.M UTC +:release-by: Ask Solem + +- Redis: Fixed race condition that could occur while trying to restore + messages (Issue #171). + + Fix contributed by Ollie Walsh. + +- Redis: Each channel is now using a specific connection pool instance, + which is disconnected on connection failure. + +- ProducerPool: Fixed possible dead-lock in the acquire method. + +- ProducerPool: ``force_close_all`` no longer tries to call the non-existent + ``Producer._close``. + +- librabbitmq: Now implements ``transport.verify_connection`` so that + connection pools will not give back connections that are no longer working. + +- New and better ``repr()`` for Queue and Exchange objects. + +- Python3: Fixed problem with running the unit test suite. + +- Python3: Fixed problem with JSON codec. + +.. _version-2.4.8: + +2.4.8 +===== +:release-date: 2012-11-02 05:00 P.M UTC +:release-by: Ask Solem + +- Redis: Improved fair queue cycle implementation (Issue #166). + + Contributed by Kevin McCarthy. + +- Redis: Unacked message restore limit is now unlimited by default. + + Also, the limit can now be configured using the ``unacked_restore_limit`` + transport option: + + .. code-block:: python + + Connection('redis://', transport_options={ + 'unacked_restore_limit': 100, + }) + + A limit of 100 means that the consumer will restore at most 100 + messages at each pass. + +- Redis: Now uses a mutex to ensure only one consumer restores messages at a + time. + + The mutex expires after 5 minutes by default, but can be configured + using the ``unacked_mutex_expire`` transport option. + +- LamportClock.adjust now returns the new clock value. + +- Heartbeats can now be specified in URLs. + + Fix contributed by Mher Movsisyan. + +- Kombu can now be used with PyDev, PyCharm and other static analysis tools. + +- Fixes problem with msgpack on Python 3 (Issue #162). + + Fix contributed by Jasper Bryant-Greene + +- amqplib: Fixed bug with timeouts when SSL is used in non-blocking mode. + + Fix contributed by Mher Movsisyan + + +.. _version-2.4.7: + +2.4.7 +===== +:release-date: 2012-09-18 03:00 P.M BST +:release-by: Ask Solem + +- Virtual: Unknown exchanges now default to 'direct' when sending a message. + +- MongoDB: Fixed memory leak when merging keys stored in the db (Issue #159) + + Fix contributed by Michael Korbakov. + +- MongoDB: Better index for MongoDB transport (Issue #158). + + This improvement will create a new compund index for queue and _id in order + to be able to use both indexed fields for getting a new message (using + queue field) and sorting by _id. It'll be necessary to manually delete + the old index from the collection. + + Improvement contributed by rmihael + +.. _version-2.4.6: + +2.4.6 +===== +:release-date: 2012-09-12 03:00 P.M BST +:release-by: Ask Solem + +- Adds additional compatibility dependencies: + + - Python <= 2.6: + + - importlib + - ordereddict + + - Python <= 2.5 + + - simplejson + +.. _version-2.4.5: + +2.4.5 +===== +:release-date: 2012-08-30 03:36 P.M BST +:release-by: Ask Solem + +- Last version broke installtion on PyPy and Jython due + to test requirements clean-up. + +.. _version-2.4.4: + +2.4.4 +===== +:release-date: 2012-08-29 04:00 P.M BST +:release-by: Ask Solem + +- amqplib: Fixed a bug with asynchronously reading large messages. + +- pyamqp: Now requires amqp 0.9.3 + +- Cleaned up test requirements. + +.. _version-2.4.3: + +2.4.3 +===== +:release-date: 2012-08-25 10:30 P.M BST +:release-by: Ask Solem + +- Fixed problem with amqp transport alias (Issue #154). + +.. _version-2.4.2: + +2.4.2 +===== +:release-date: 2012-08-24 05:00 P.M BST +:release-by: Ask Solem + +- Having an empty transport name broke in 2.4.1. + + +.. _version-2.4.1: + +2.4.1 +===== +:release-date: 2012-08-24 04:00 P.M BST +:release-by: Ask Solem + +- Redis: Fixed race condition that could cause the consumer to crash (Issue #151) + + Often leading to the error message ``"could not convert string to float"`` + +- Connection retry could cause an inifite loop (Issue #145). + +- The ``amqp`` alias is now resolved at runtime, so that eventlet detection + works even if patching was done later. + +.. _version-2.4.0: + +2.4.0 +===== +:release-date: 2012-08-17 08:00 P.M BST +:release-by: Ask Solem + +- New experimental :mod:`ZeroMQ >> conn = Connection('pyamqp://guest:guest@localhost//') + + + The ``pyamqp://`` transport will be the default fallback transport + in Kombu version 3.0, when :mod:`librabbitmq` is not installed, + and librabbitmq will also be updated to support the same features. + +- Connection now supports heartbeat argument. + + If enabled you must make sure to manually maintain heartbeats + by calling the ``Connection.heartbeat_check`` at twice the rate + of the specified heartbeat interval. + + E.g. if you have ``Connection(heartbeat=10)``, + then you must call ``Connection.heartbeat_check()`` every 5 seconds. + + if the server has not sent heartbeats at a suitable rate then + the heartbeat check method must raise an error that is listed + in ``Connection.connection_errors``. + + The attribute ``Connection.supports_heartbeats`` has been added + for the ability to inspect if a transport supports heartbeats + or not. + + Calling ``heartbeat_check`` on a transport that does + not support heartbeats results in a noop operation. + +- SQS: Fixed bug with invalid characters in queue names. + + Fix contributed by Zach Smith. + +- utils.reprcall: Fixed typo where kwargs argument was an empty tuple by + default, and not an empty dict. + +.. _version-2.2.6: + +2.2.6 +===== +:release-date: 2012-07-10 05:00 P.M BST +:release-by: Ask Solem + +- Adds ``kombu.messaging.entry_to_queue`` for compat with previous versions. + +.. _version-2.2.5: + +2.2.5 +===== +:release-date: 2012-07-10 05:00 P.M BST +:release-by: Ask Solem + +- Pidbox: Now sets queue expire at 10 seconds for reply queues. + +- EventIO: Now ignores ``ValueError`` raised by epoll unregister. + +- MongoDB: Fixes Issue #142 + + Fix by Flavio Percoco Premoli + +.. _version-2.2.4: + +2.2.4 +===== +:release-date: 2012-07-05 04:00 P.M BST +:release-by: Ask Solem + +- Support for msgpack-python 0.2.0 (Issue #143) + + The latest msgpack version no longer supports Python 2.5, so if you're + still using that you need to depend on an earlier msgpack-python version. + + Fix contributed by Sebastian Insua + +- :func:`~kombu.common.maybe_declare` no longer caches entities with the + ``auto_delete`` flag set. + +- New experimental filesystem transport. + + Contributed by Bobby Beever. + +- Virtual Transports: Now support anonymous queues and exchanges. + +.. _version-2.2.3: + +2.2.3 +===== +:release-date: 2012-06-24 05:00 P.M BST +:release-by: Ask Solem + +- ``BrokerConnection`` now renamed to ``Connection``. + + The name ``Connection`` has been an alias for a very long time, + but now the rename is official in the documentation as well. + + The Connection alias has been available since version 1.1.3, + and ``BrokerConnection`` will still work and is not deprecated. + +- ``Connection.clone()`` now works for the sqlalchemy transport. + +- :func:`kombu.common.eventloop`, :func:`kombu.utils.uuid`, + and :func:`kombu.utils.url.parse_url` can now be + imported from the :mod:`kombu` module directly. + +- Pidbox transport callback ``after_reply_message_received`` now happens + in a finally block. + +- Trying to use the ``librabbitmq://`` transport will now show the right + name in the :exc:`ImportError` if :mod:`librabbitmq` is not installed. + + The librabbitmq falls back to the older ``pylibrabbitmq`` name for + compatibility reasons and would therefore show ``No module named + pylibrabbitmq`` instead of librabbitmq. + + +.. _version-2.2.2: + +2.2.2 +===== +:release-date: 2012-06-22 02:30 P.M BST +:release-by: Ask Solem + +- Now depends on :mod:`anyjson` 0.3.3 + +- Json serializer: Now passes :class:`buffer` objects directly, + since this is supported in the latest :mod:`anyjson` version. + +- Fixes blocking epoll call if timeout was set to 0. + + Fix contributed by John Watson. + +- setup.py now takes requirements from the :file:`requirements/` directory. + +- The distribution directory :file:`contrib/` is now renamed to :file:`extra/` + +.. _version-2.2.1: + +2.2.1 +===== +:release-date: 2012-06-21 01:00 P.M BST +:release-by: Ask Solem + +- SQS: Default visibility timeout is now 30 minutes. + + Since we have ack emulation the visibility timeout is + only in effect if the consumer is abrubtly terminated. + +- retry argument to ``Producer.publish`` now works properly, + when the declare argument is specified. + +- Json serializer: didn't handle buffer objects (Issue #135). + + Fix contributed by Jens Hoffrichter. + +- Virtual: Now supports passive argument to ``exchange_declare``. + +- Exchange & Queue can now be bound to connections (which will use the default + channel): + + >>> exchange = Exchange('name') + >>> bound_exchange = exchange(connection) + >>> bound_exchange.declare() + +- ``SimpleQueue`` & ``SimpleBuffer`` can now be bound to connections (which + will use the default channel). + +- ``Connection.manager.get_bindings`` now works for librabbitmq and pika. + +- Adds new transport info attributes:: + + - ``Transport.driver_type`` + + Type of underlying driver, e.g. "amqp", "redis", "sql". + + - ``Transport.driver_name`` + + Name of library used e.g. "amqplib", "redis", "pymongo". + + - ``Transport.driver_version()`` + + Version of underlying library. + +.. _version-2.2.0: + +2.2.0 +===== +:release-date: 2012-06-07 03:10 P.M BST +:release-by: Ask Solem + +.. _v220-important: + +Important Notes +--------------- + +- The canonical source code repository has been moved to + + http://github.com/celery/kombu + +- Pidbox: Exchanges used by pidbox are no longer auto_delete. + + Auto delete has been described as a misfeature, + and therefore we have disabled it. + + For RabbitMQ users old exchanges used by pidbox must be removed, + these are named ``mailbox_name.pidbox``, + and ``reply.mailbox_name.pidbox``. + + The following command can be used to clean up these exchanges:: + + VHOST=/ URL=amqp:// python -c'import sys,kombu;[kombu.Connection( + sys.argv[-1]).channel().exchange_delete(x) + for x in sys.argv[1:-1]]' \ + $(sudo rabbitmqctl -q list_exchanges -p "$VHOST" \ + | grep \.pidbox | awk '{print $1}') "$URL" + + The :envvar:`VHOST` variable must be set to the target RabbitMQ virtual host, + and the :envvar:`URL` must be the AMQP URL to the server. + +- The ``amqp`` transport alias will now use :mod:`librabbitmq` + if installed. + + `py-librabbitmq`_ is a fast AMQP client for Python + using the librabbitmq C library. + + It can be installed by:: + + $ pip install librabbitmq + + It will not be used if the process is monkey patched by eventlet/gevent. + +.. _`py-librabbitmq`: https://github.com/celery/librabbitmq + +.. _v220-news: + +News +---- + +- Redis: Ack emulation improvements. + + Reducing the possibility of data loss. + + Acks are now implemented by storing a copy of the message when the message + is consumed. The copy is not removed until the consumer acknowledges + or rejects it. + + This means that unacknowledged messages will be redelivered either + when the connection is closed, or when the visibility timeout is exceeded. + + - Visibility timeout + + This is a timeout for acks, so that if the consumer + does not ack the message within this time limit, the message + is redelivered to another consumer. + + The timeout is set to one hour by default, but + can be changed by configuring a transport option: + + >>> Connection('redis://', transport_options={ + ... 'visibility_timeout': 1800, # 30 minutes + ... }) + + **NOTE**: Messages that have not been acked will be redelivered + if the visibility timeout is exceeded, for Celery users + this means that ETA/countdown tasks that are scheduled to execute + with a time that exceeds the visibility timeout will be executed + twice (or more). If you plan on using long ETA/countdowns you + should tweak the visibility timeout accordingly:: + + BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 18000} # 5 hours + + Setting a long timeout means that it will take a long time + for messages to be redelivered in the event of a power failure, + but if so happens you could temporarily set the visibility timeout lower + to flush out messages when you start up the systems again. + +- Experimental `Apache ZooKeeper`_ transport + + More information is in the module reference: + :mod:`kombu.transport.zookeeper`. + + Contributed by Mahendra M. + +.. _`Apache ZooKeeper`: http://zookeeper.apache.org/ + +- Redis: Priority support. + + The message's ``priority`` field is now respected by the Redis + transport by having multiple lists for each named queue. + The queues are then consumed by in order of priority. + + The priority field is a number in the range of 0 - 9, where + 0 is the default and highest priority. + + The priority range is collapsed into four steps by default, since it is + unlikely that nine steps will yield more benefit than using four steps. + The number of steps can be configured by setting the ``priority_steps`` + transport option, which must be a list of numbers in **sorted order**:: + + >>> x = Connection('redis://', transport_options={ + ... 'priority_steps': [0, 2, 4, 6, 8, 9], + ... }) + + Priorities implemented in this way is not as reliable as + priorities on the server side, which is why + nickname the feature "quasi-priorities"; + **Using routing is still the suggested way of ensuring + quality of service**, as client implemented priorities + fall short in a number of ways, e.g. if the worker + is busy with long running tasks, has prefetched many messages, + or the queues are congested. + + Still, it is possible that using priorities in combination + with routing can be more beneficial than using routing + or priorities alone. Experimentation and monitoring + should be used to prove this. + + Contributed by Germán M. Bravo. + +- Redis: Now cycles queues so that consuming is fair. + + This ensures that a very busy queue won't block messages + from other queues, and ensures that all queues have + an equal chance of being consumed from. + + This used to be the case before, but the behavior was + accidentally changed while switching to using blocking pop. + +- Redis: Auto delete queues that are bound to fanout exchanges + is now deleted at channel.close. + +- amqplib: Refactored the drain_events implementation. + +- Pidbox: Now uses ``connection.default_channel``. + +- Pickle serialization: Can now decode buffer objects. + +- Exchange/Queue declarations can now be cached even if + the entity is non-durable. + + This is possible because the list of cached declarations + are now kept with the connection, so that the entities + will be redeclared if the connection is lost. + +- Kombu source code now only uses one-level of explicit relative imports. + +.. _v220-fixes: + +Fixes +----- + +- eventio: Now ignores ENOENT raised by ``epoll.register``, and + EEXIST from ``epoll.unregister``. + +- eventio: kqueue now ignores :exc:`KeyError` on unregister. + +- Redis: ``Message.reject`` now supports the ``requeue`` argument. + +- Redis: Remove superfluous pipeline call. + + Fix contributed by Thomas Johansson. + +- Redis: Now sets redelivered header for redelivered messages. + +- Now always makes sure references to :func:`sys.exc_info` is removed. + +- Virtual: The compression header is now removed before restoring messages. + +- More tests for the SQLAlchemy backend. + + Contributed by Franck Cuny. + +- Url parsing did not handle MongoDB URLs properly. + + Fix contributed by Flavio Percoco Premoli. + +- Beanstalk: Ignore default tube when reserving. + + Fix contributed by Zhao Xiaohong. + +Nonblocking consume support +--------------------------- + +librabbitmq, amqplib and redis transports can now be used +non-blocking. + +The interface is very manual, and only consuming messages +is non-blocking so far. + +The API should not be regarded as stable or final +in any way. It is used by Celery which has very limited +needs at this point. Hopefully we can introduce a proper +callback-based API later. + +- ``Transport.eventmap`` + + Is a map of ``fd -> callback(fileno, event)`` + to register in an eventloop. + +- ``Transport.on_poll_start()`` + + Is called before every call to poll. + The poller must support ``register(fd, callback)`` + and ``unregister(fd)`` methods. + +- ``Transport.on_poll_start(poller)`` + + Called when the hub is initialized. + The poller argument must support the same + interface as :class:`kombu.utils.eventio.poll`. + +- ``Connection.ensure_connection`` now takes a callback + argument which is called for every loop while + the connection is down. + +- Adds ``connection.drain_nowait`` + + This is a non-blocking alternative to drain_events, + but only supported by amqplib/librabbitmq. + +- drain_events now sets ``connection.more_to_read`` if + there is more data to read. + + This is to support eventloops where other things + must be handled between draining events. + +.. _version-2.1.8: + +2.1.8 +===== +:release-date: 2012-05-06 03:06 P.M BST +:release-by: Ask Solem + +* Bound Exchange/Queue's are now pickleable. + +* Consumer/Producer can now be instantiated without a channel, + and only later bound using ``.revive(channel)``. + +* ProducerPool now takes ``Producer`` argument. + +* :func:`~kombu.utils.fxrange` now counts forever if the + stop argument is set to None. + (fxrange is like xrange but for decimals). + +* Auto delete support for virtual transports were incomplete + and could lead to problems so it was removed. + +* Cached declarations (:func:`~kombu.common.maybe_declare`) + are now bound to the underlying connection, so that + entities are redeclared if the connection is lost. + + This also means that previously uncacheable entities + (e.g. non-durable) can now be cached. + +* compat ConsumerSet: can now specify channel. + +.. _version-2.1.7: + +2.1.7 +===== +:release-date: 2012-04-27 06:00 P.M BST +:release-by: Ask Solem + +* compat consumerset now accepts optional channel argument. + +.. _version-2.1.6: + +2.1.6 +===== +:release-date: 2012-04-23 01:30 P.M BST +:release-by: Ask Solem + +* SQLAlchemy transport was not working correctly after URL parser change. + +* maybe_declare now stores cached declarations per underlying connection + instead of globally, in the rare case that data disappears from the + broker after connection loss. + +* Django: Added South migrations. + + Contributed by Joseph Crosland. + +.. _version-2.1.5: + +2.1.5 +===== +:release-date: 2012-04-13 03:30 P.M BST +:release-by: Ask Solem + +* The url parser removed more than the first leading slash (Issue #121). + +* SQLAlchemy: Can now specify url using + separator + + Example:: + + Connection('sqla+mysql://localhost/db') + +* Better support for anonymous queues (Issue #116). + + Contributed by Michael Barrett. + +* ``Connection.as_uri`` now quotes url parts (Issue #117). + +* Beanstalk: Can now set message TTR as a message property. + + Contributed by Andrii Kostenko + +.. _version-2.1.4: + +2.1.4 +===== +:release-date: 2012-04-03 04:00 P.M GMT +:release-by: Ask Solem + +* MongoDB: URL parsing are now delegated to the pymongo library + (Fixes Issue #103 and Issue #87). + + Fix contributed by Flavio Percoco Premoli and James Sullivan + +* SQS: A bug caused SimpleDB to be used even if sdb persistence + was not enabled (Issue #108). + + Fix contributed by Anand Kumria. + +* Django: Transaction was committed in the wrong place, causing + data cleanup to fail (Issue #115). + + Fix contributed by Daisuke Fujiwara. + +* MongoDB: Now supports replica set URLs. + + Contributed by Flavio Percoco Premoli. + +* Redis: Now raises a channel error if a queue key that is currently + being consumed from disappears. + + Fix contributed by Stephan Jaekel. + +* All transport 'channel_errors' lists now includes + ``kombu.exception.StdChannelError``. + +* All kombu exceptions now inherit from a common + :exc:`~kombu.exceptions.KombuError`. + +.. _version-2.1.3: + +2.1.3 +===== +:release-date: 2012-03-20 03:00 P.M GMT +:release-by: Ask Solem + +* Fixes Jython compatibility issues. + +* Fixes Python 2.5 compatibility issues. + +.. _version-2.1.2: + +2.1.2 +===== +:release-date: 2012-03-01 01:00 P.M GMT +:release-by: Ask Solem + +* amqplib: Last version broke SSL support. + +.. _version-2.1.1: + +2.1.1 +===== +:release-date: 2012-02-24 02:00 P.M GMT +:release-by: Ask Solem + +* Connection URLs now supports encoded characters. + +* Fixed a case where connection pool could not recover from connection loss. + + Fix contributed by Florian Munz. + +* We now patch amqplib's ``__del__`` method to skip trying to close the socket + if it is not connected, as this resulted in an annoying warning. + +* Compression can now be used with binary message payloads. + + Fix contributed by Steeve Morin. + +.. _version-2.1.0: + +2.1.0 +===== +:release-date: 2012-02-04 10:38 P.M GMT +:release-by: Ask Solem + +* MongoDB: Now supports fanout (broadcast) (Issue #98). + + Contributed by Scott Lyons. + +* amqplib: Now detects broken connections by using ``MSG_PEEK``. + +* pylibrabbitmq: Now supports ``basic_get`` (Issue #97). + +* gevent: Now always uses the ``select`` polling backend. + +* pika transport: Now works with pika 0.9.5 and 0.9.6dev. + + The old pika transport (supporting 0.5.x) is now available + as alias ``oldpika``. + + (Note terribly latency has been experienced with the new pika + versions, so this is still an experimental transport). + +* Virtual transports: can now set polling interval via the + transport options (Issue #96). + + Example:: + + >>> Connection('sqs://', transport_options={ + ... 'polling_interval': 5.0}) + + The default interval is transport specific, but usually + 1.0s (or 5.0s for the Django database transport, which + can also be set using the ``KOMBU_POLLING_INTERVAL`` setting). + +* Adds convenience function: :func:`kombu.common.eventloop`. + +.. _version-2.0.0: + +2.0.0 +===== +:release-date: 2012-01-15 06:34 P.M GMT +:release-by: Ask Solem + +.. _v200-important: + +Important Notes +--------------- + +.. _v200-python-compatibility: + +Python Compatibility +~~~~~~~~~~~~~~~~~~~~ + +* No longer supports Python 2.4. + + Users of Python 2.4 can still use the 1.x series. + + The 1.x series has entered bugfix-only maintenance mode, and will + stay that way as long as there is demand, and a willingness to + maintain it. + + +.. _v200-new-transports: + +New Transports +~~~~~~~~~~~~~~ + +* ``django-kombu`` is now part of Kombu core. + + The Django message transport uses the Django ORM to store messages. + + It uses polling, with a default polling interval of 5 seconds. + The polling interval can be increased or decreased by configuring the + ``KOMBU_POLLING_INTERVAL`` Django setting, which is the polling + interval in seconds as an int or a float. Note that shorter polling + intervals can cause extreme strain on the database: if responsiveness + is needed you shall consider switching to a non-polling transport. + + To use it you must use transport alias ``"django"``, + or as an URL:: + + django:// + + and then add ``kombu.transport.django`` to ``INSTALLED_APPS``, and + run ``manage.py syncdb`` to create the necessary database tables. + + **Upgrading** + + If you have previously used ``django-kombu``, then the entry + in ``INSTALLED_APPS`` must be changed from ``djkombu`` + to ``kombu.transport.django``:: + + INSTALLED_APPS = (…, + 'kombu.transport.django') + + If you have previously used django-kombu, then there is no need + to recreate the tables, as the old tables will be fully compatible + with the new version. + +* ``kombu-sqlalchemy`` is now part of Kombu core. + + This change requires no code changes given that the + ``sqlalchemy`` transport alias is used. + +.. _v200-news: + +News +---- + +* :class:`kombu.mixins.ConsumerMixin` is a mixin class that lets you + easily write consumer programs and threads. + + See :ref:`examples` and :ref:`guide-consumers`. + +* SQS Transport: Added support for SQS queue prefixes (Issue #84). + + The queue prefix can be set using the transport option + ``queue_name_prefix``:: + + BrokerTransport('SQS://', transport_options={ + 'queue_name_prefix': 'myapp'}) + + Contributed by Nitzan Miron. + +* ``Producer.publish`` now supports automatic retry. + + Retry is enabled by the ``reply`` argument, and retry options + set by the ``retry_policy`` argument:: + + exchange = Exchange('foo') + producer.publish(message, exchange=exchange, retry=True, + declare=[exchange], retry_policy={ + 'interval_start': 1.0}) + + See :meth:`~kombu.Connection.ensure` + for a list of supported retry policy options. + +* ``Producer.publish`` now supports a ``declare`` keyword argument. + + This is a list of entities (:class:`Exchange`, or :class:`Queue`) + that should be declared before the message is published. + +.. _v200-fixes: + +Fixes +----- + +* Redis transport: Timeout was multiplied by 1000 seconds when using + ``select`` for event I/O (Issue #86). + +.. _version-1.5.1: + +1.5.1 +===== +:release-date: 2011-11-30 01:00 P.M GMT +:release-by: Ask Solem + +* Fixes issue with ``kombu.compat`` introduced in 1.5.0 (Issue #83). + +* Adds the ability to disable content_types in the serializer registry. + + Any message with a content type that is disabled will be refused. + One example would be to disable the Pickle serializer: + + >>> from kombu.serialization import registry + # by name + >>> registry.disable('pickle') + # or by mime-type. + >>> registry.disable('application/x-python-serialize') + +.. _version-1.5.0: + +1.5.0 +===== +:release-date: 2011-11-27 06:00 P.M GMT +:release-by: Ask Solem + +* kombu.pools: Fixed a bug resulting in resources not being properly released. + + This was caused by the use of ``__hash__`` to distinguish them. + +* Virtual transports: Dead-letter queue is now disabled by default. + + The dead-letter queue was enabled by default to help application + authors, but now that Kombu is stable it should be removed. + There are after all many cases where messages should just be dropped + when there are no queues to buffer them, and keeping them without + supporting automatic cleanup is rather considered a resource leak + than a feature. + + If wanted the dead-letter queue can still be enabled, by using + the ``deadletter_queue`` transport option:: + + >>> x = Connection('redis://', + ... transport_options={'deadletter_queue': 'ae.undeliver'}) + + In addition, an :class:`UndeliverableWarning` is now emitted when + the dead-letter queue is enabled and a message ends up there. + + Contributed by Ionel Maries Cristian. + +* MongoDB transport now supports Replicasets (Issue #81). + + Contributed by Ivan Metzlar. + +* The ``Connection.ensure`` methods now accepts a ``max_retries`` value + of 0. + + A value of 0 now means *do not retry*, which is distinct from :const:`None` + which means *retry indefinitely*. + + Contributed by Dan McGee. + +* SQS Transport: Now has a lowercase ``sqs`` alias, so that it can be + used with broker URLs (Issue #82). + + Fix contributed by Hong Minhee + +* SQS Transport: Fixes KeyError on message acknowledgements (Issue #73). + + The SQS transport now uses UUID's for delivery tags, rather than + a counter. + + Fix contributed by Brian Bernstein. + +* SQS Transport: Unicode related fixes (Issue #82). + + Fix contributed by Hong Minhee. + +* Redis version check could crash because of improper handling of types + (Issue #63). + +* Fixed error with `Resource.force_close_all` when resources + were not yet properly initialized (Issue #78). + +.. _version-1.4.3: + +1.4.3 +===== +:release-date: 2011-10-27 10:00 P.M BST +:release-by: Ask Solem + +* Fixes bug in ProducerPool where too many resources would be acquired. + +.. _version-1.4.2: + +1.4.2 +===== +:release-date: 2011-10-26 05:00 P.M BST +:release-by: Ask Solem + +* Eventio: Polling should ignore `errno.EINTR` + +* SQS: str.encode did only start accepting kwargs after Py2.7. + +* simple_task_queue example didn't run correctly (Issue #72). + + Fix contributed by Stefan Eletzhofer. + +* Empty messages would not raise an exception not able to be handled + by `on_decode_error` (Issue #72) + + Fix contributed by Christophe Chauvet. + +* CouchDB: Properly authenticate if user/password set (Issue #70) + + Fix contributed by Rafael Duran Castaneda + +* Connection.Consumer had the wrong signature. + + Fix contributed by Pavel Skvazh + +.. _version-1.4.1: + +1.4.1 +===== +:release-date: 2011-09-26 04:00 P.M BST +:release-by: Ask Solem + +* 1.4.0 broke the producer pool, resulting in new connections being + established for every acquire. + + +.. _version-1.4.0: + +1.4.0 +===== +:release-date: 2011-09-22 05:00 P.M BST +:release-by: Ask Solem + +* Adds module :mod:`kombu.mixins`. + + This module contains a :class:`~kombu.mixins.ConsumerMixin` class + that can be used to easily implement a message consumer + thread that consumes messages from one or more + :class:`kombu.Consumer` instances. + +* New example: :ref:`task-queue-example` + + Using the ``ConsumerMixin``, default channels and + the global connection pool to demonstrate new Kombu features. + +* MongoDB transport did not work with MongoDB >= 2.0 (Issue #66) + + Fix contributed by James Turk. + +* Redis-py version check did not account for beta identifiers + in version string. + + Fix contributed by David Ziegler. + +* Producer and Consumer now accepts a connection instance as the + first argument. + + The connections default channel will then be used. + + In addition shortcut methods has been added to Connection:: + + >>> connection.Producer(exchange) + >>> connection.Consumer(queues=..., callbacks=...) + +* Connection has aquired a ``connected`` attribute that + can be used to check if the connection instance has established + a connection. + +* ``ConnectionPool.acquire_channel`` now returns the connections + default channel rather than establising a new channel that + must be manually handled. + +* Added ``kombu.common.maybe_declare`` + + ``maybe_declare(entity)`` declares an entity if it has + not previously been declared in the same process. + +* :func:`kombu.compat.entry_to_queue` has been moved to :mod:`kombu.common` + +* New module :mod:`kombu.clocks` now contains an implementation + of Lamports logical clock. + +.. _version-1.3.5: + +1.3.5 +===== +:release-date: 2011-09-16 06:00 P.M BST +:release-by: Ask Solem + +* Python 3: AMQP_PROTOCOL_HEADER must be bytes, not str. + +.. _version-1.3.4: + +1.3.4 +===== +:release-date: 2011-09-16 06:00 P.M BST +:release-by: Ask Solem + +* Fixes syntax error in pools.reset + + +.. _version-1.3.3: + +1.3.3 +===== +:release-date: 2011-09-15 02:00 P.M BST +:release-by: Ask Solem + +* pools.reset did not support after forker arguments. + +.. _version-1.3.2: + +1.3.2 +===== +:release-date: 2011-09-10 01:00 P.M BST +:release-by: Mher Movsisyan + +* Broke Python 2.5 compatibility by importing ``parse_qsl`` from ``urlparse`` + +* Connection.default_channel is now closed when connection is revived + after connection failures. + +* Pika: Channel now supports the ``connection.client`` attribute + as required by the simple interface. + +* pools.set_limit now raises an exception if the limit is lower + than the previous limit. + +* pools.set_limit no longer resets the pools. + +.. _version-1.3.1: + +1.3.1 +===== +:release-date: 2011-10-07 03:00 P.M BST +:release-by: Ask Solem + +* Last release broke after fork for pool reinitialization. + +* Producer/Consumer now has a ``connection`` attribute, + giving access to the :class:`Connection` of the + instance. + +* Pika: Channels now have access to the underlying + :class:`Connection` instance using ``channel.connection.client``. + + This was previously required by the ``Simple`` classes and is now + also required by :class:`Consumer` and :class:`Producer`. + +* Connection.default_channel is now closed at object revival. + +* Adds kombu.clocks.LamportClock. + +* compat.entry_to_queue has been moved to new module :mod:`kombu.common`. + +.. _version-1.3.0: + +1.3.0 +===== +:release-date: 2011-10-05 01:00 P.M BST +:release-by: Ask Solem + +* Broker connection info can be now be specified using URLs + + The broker hostname can now be given as an URL instead, of the format:: + + transport://user:password@hostname:port/virtual_host + + for example the default broker is expressed as:: + + >>> Connection('amqp://guest:guest@localhost:5672//') + + Transport defaults to amqp, and is not required. + user, password, port and virtual_host is also not mandatory and + will default to the corresponding transports default. + + .. note:: + + Note that the path component (virtual_host) always starts with a + forward-slash. This is necessary to distinguish between the virtual + host '' (empty) and '/', which are both acceptable virtual host names. + + A virtual host of '/' becomes: + + amqp://guest:guest@localhost:5672// + + and a virtual host of '' (empty) becomes:: + + amqp://guest:guest@localhost:5672/ + + So the leading slash in the path component is **always required**. + +* Now comes with default global connection and producer pools. + + The acquire a connection using the connection parameters + from a :class:`Connection`:: + + >>> from kombu import Connection, connections + >>> connection = Connection('amqp://guest:guest@localhost//') + >>> with connections[connection].acquire(block=True): + ... # do something with connection + + To acquire a producer using the connection parameters + from a :class:`Connection`:: + + >>> from kombu import Connection, producers + >>> connection = Connection('amqp://guest:guest@localhost//') + >>> with producers[connection].acquire(block=True): + ... producer.publish({'hello': 'world'}, exchange='hello') + + Acquiring a producer will in turn also acquire a connection + from the associated pool in ``connections``, so you the number + of producers is bound the same limit as number of connections. + + The default limit of 100 connections per connection instance + can be changed by doing:: + + >>> from kombu import pools + >>> pools.set_limit(10) + + The pool can also be forcefully closed by doing:: + + >>> from kombu import pools + >>> pool.reset() + +* SQS Transport: Persistence using SimpleDB is now disabled by default, + after reports of unstable SimpleDB connections leading to errors. + +* :class:`Producer` can now be used as a context manager. + +* ``Producer.__exit__`` now properly calls ``release`` instead of close. + + The previous behavior would lead to a memory leak when using + the :class:`kombu.pools.ProducerPool` + +* Now silences all exceptions from `import ctypes` to match behaviour + of the standard Python uuid module, and avoid passing on MemoryError + exceptions on SELinux-enabled systems (Issue #52 + Issue #53) + +* ``amqp`` is now an alias to the ``amqplib`` transport. + +* ``kombu.syn.detect_environment`` now returns 'default', 'eventlet', or + 'gevent' depending on what monkey patches have been installed. + +* Serialization registry has new attribute ``type_to_name`` so it is + possible to lookup serializater name by content type. + +* Exchange argument to ``Producer.publish`` can now be an :class:`Exchange` + instance. + +* ``compat.Publisher`` now supports the ``channel`` keyword argument. + +* Acking a message on some transports could lead to :exc:`KeyError` being + raised (Issue #57). + +* Connection pool: Connections are no long instantiated when the pool is + created, but instantiated as needed instead. + +* Tests now pass on PyPy. + +* ``Connection.as_uri`` now includes the password if the keyword argument + ``include_password`` is set. + +* Virtual transports now comes with a default ``default_connection_params`` + attribute. + +.. _version-1.2.1: + +1.2.1 +===== +:release-date: 2011-07-29 12:52 P.M BST +:release-by: Ask Solem + +* Now depends on amqplib >= 1.0.0. + +* Redis: Now automatically deletes auto_delete queues at ``basic_cancel``. + +* ``serialization.unregister`` added so it is possible to remove unwanted + seralizers. + +* Fixes MemoryError while importing ctypes on SELinux (Issue #52). + +* ``Connection.autoretry`` is a version of ``ensure`` that works + with arbitrary functions (i.e. it does not need an associated object + that implements the ``revive`` method. + + Example usage: + + .. code-block:: python + + channel = connection.channel() + try: + ret, channel = connection.autoretry(send_messages, channel=channel) + finally: + channel.close() + +* ``ConnectionPool.acquire`` no longer force establishes the connection. + + The connection will be established as needed. + +* ``Connection.ensure`` now supports an ``on_revive`` callback + that is applied whenever the connection is re-established. + +* ``Consumer.consuming_from(queue)`` returns True if the Consumer is + consuming from ``queue``. + +* ``Consumer.cancel_by_queue`` did not remove the queue from ``queues``. + +* ``compat.ConsumerSet.add_queue_from_dict`` now automatically declared + the queue if ``auto_declare`` set. + +.. _version-1.2.0: + +1.2.0 +===== +:release-date: 2011-07-15 12:00 P.M BST +:release-by: Ask Solem + +* Virtual: Fixes cyclic reference in Channel.close (Issue #49). + +* Producer.publish: Can now set additional properties using keyword + arguments (Issue #48). + +* Adds Queue.no_ack option to control the no_ack option for individual queues. + +* Recent versions broke pylibrabbitmq support. + +* SimpleQueue and SimpleBuffer can now be used as contexts. + +* Test requirements specifies PyYAML==3.09 as 3.10 dropped Python 2.4 support + +* Now properly reports default values in Connection.info/.as_uri + +.. _version-1.1.6: + +1.1.6 +===== +:release-date: 2011-06-13 04:00 P.M BST +:release-by: Ask Solem + +* Redis: Fixes issue introduced in 1.1.4, where a redis connection + failure could leave consumer hanging forever. + +* SQS: Now supports fanout messaging by using SimpleDB to store routing + tables. + + This can be disabled by setting the `supports_fanout` transport option: + + >>> Connection(transport='SQS', + ... transport_options={'supports_fanout': False}) + +* SQS: Now properly deletes a message when a message is acked. + +* SQS: Can now set the Amazon AWS region, by using the ``region`` + transport option. + +* amqplib: Now uses `localhost` as default hostname instead of raising an + error. + +.. _version-1.1.5: + +1.1.5 +===== +:release-date: 2011-06-07 06:00 P.M BST +:release-by: Ask Solem + +* Fixes compatibility with redis-py 2.4.4. + +.. _version-1.1.4: + +1.1.4 +===== +:release-date: 2011-06-07 04:00 P.M BST +:release-by: Ask Solem + +* Redis transport: Now requires redis-py version 2.4.4 or later. + +* New Amazon SQS transport added. + + Usage: + + >>> conn = Connection(transport='SQS', + ... userid=aws_access_key_id, + ... password=aws_secret_access_key) + + The environment variables :envvar:`AWS_ACCESS_KEY_ID` and + :envvar:`AWS_SECRET_ACCESS_KEY` are also supported. + +* librabbitmq transport: Fixes default credentials support. + +* amqplib transport: Now supports `login_method` for SSL auth. + + :class:`Connection` now supports the `login_method` + keyword argument. + + Default `login_method` is ``AMQPLAIN``. + +.. _version-1.1.3: + +1.1.3 +===== +:release-date: 2011-04-21 04:00 P.M CEST +:release-by: Ask Solem + +* Redis: Consuming from multiple connections now works with Eventlet. + +* Redis: Can now perform channel operations while the channel is in + BRPOP/LISTEN mode (Issue #35). + + Also the async BRPOP now times out after 1 second, this means that + cancelling consuming from a queue/starting consuming from additional queues + has a latency of up to one second (BRPOP does not support subsecond + timeouts). + +* Virtual: Allow channel objects to be closed multiple times without error. + +* amqplib: ``AttributeError`` has been added to the list of known + connection related errors (:attr:`Connection.connection_errors`). + +* amqplib: Now converts :exc:`SSLError` timeout errors to + :exc:`socket.timeout` (http://bugs.python.org/issue10272) + +* Ensures cyclic references are destroyed when the connection is closed. + +.. _version-1.1.2: + +1.1.2 +===== +:release-date: 2011-04-06 04:00 P.M CEST +:release-by: Ask Solem + +* Redis: Fixes serious issue where messages could be lost. + + The issue could happen if the message exceeded a certain number + of kilobytes in size. + + It is recommended that all users of the Redis transport should + upgrade to this version, even if not currently experiencing any + issues. + +.. _version-1.1.1: + +1.1.1 +===== +:release-date: 2011-04-05 03:51 P.M CEST +:release-by: Ask Solem + +* 1.1.0 started using ``Queue.LifoQueue`` which is only available + in Python 2.6+ (Issue #33). We now ship with our own LifoQueue. + + +.. _version-1.1.0: + +1.1.0 +===== +:release-date: 2011-04-05 01:05 P.M CEST +:release-by: Ask Solem + +.. _v110-important: + +Important Notes +--------------- + +* Virtual transports: Message body is now base64 encoded by default + (Issue #27). + + This should solve problems sending binary data with virtual + transports. + + Message compatibility is handled by adding a ``body_encoding`` + property, so messages sent by older versions is compatible + with this release. However -- If you are accessing the messages + directly not using Kombu, then you have to respect + the ``body_encoding`` property. + + If you need to disable base64 encoding then you can do so + via the transport options:: + + Connection(transport='...', + transport_options={'body_encoding': None}) + + **For transport authors**: + + You don't have to change anything in your custom transports, + as this is handled automatically by the base class. + + If you want to use a different encoder you can do so by adding + a key to ``Channel.codecs``. Default encoding is specified + by the ``Channel.body_encoding`` attribute. + + A new codec must provide two methods: ``encode(data)`` and + ``decode(data)``. + +* ConnectionPool/ChannelPool/Resource: Setting ``limit=None`` (or 0) + now disables pool semantics, and will establish and close + the resource whenever acquired or released. + +* ConnectionPool/ChannelPool/Resource: Is now using a LIFO queue + instead of the previous FIFO behavior. + + This means that the last resource released will be the one + acquired next. I.e. if only a single thread is using the pool + this means only a single connection will ever be used. + +* Connection: Cloned connections did not inherit transport_options + (``__copy__``). + +* contrib/requirements is now located in the top directory + of the distribution. + +* MongoDB: Now supports authentication using the ``userid`` and ``password`` + arguments to :class:`Connection` (Issue #30). + +* Connection: Default autentication credentials are now delegated to + the individual transports. + + This means that the ``userid`` and ``password`` arguments to + Connection is no longer *guest/guest* by default. + + The amqplib and pika transports will still have the default + credentials. + +* :meth:`Consumer.__exit__` did not have the correct signature (Issue #32). + +* Channel objects now have a ``channel_id`` attribute. + +* MongoDB: Version sniffing broke with development versions of + mongod (Issue #29). + +* New environment variable :envvar:`KOMBU_LOG_CONNECTION` will now emit debug + log messages for connection related actions. + + :envvar:`KOMBU_LOG_DEBUG` will also enable :envvar:`KOMBU_LOG_CONNECTION`. + +.. _version-1.0.7: + +1.0.7 +===== +:release-date: 2011-03-28 05:45 P.M CEST +:release-by: Ask Solem + +* Now depends on anyjson 0.3.1 + + cjson is no longer a recommended json implementation, and anyjson + will now emit a deprecation warning if used. + +* Please note that the Pika backend only works with version 0.5.2. + + The latest version (0.9.x) drastically changed API, and it is not + compatible yet. + +* on_decode_error is now called for exceptions in message_to_python + (Issue #24). + +* Redis: did not respect QoS settings. + +* Redis: Creating a connection now ensures the connection is established. + + This means ``Connection.ensure_connection`` works properly with + Redis. + +* consumer_tag argument to ``Queue.consume`` can't be :const:`None` + (Issue #21). + + A None value is now automatically converted to empty string. + An empty string will make the server generate a unique tag. + +* Connection now supports a ``transport_options`` argument. + + This can be used to pass additional arguments to transports. + +* Pika: ``drain_events`` raised :exc:`socket.timeout` even if no timeout + set (Issue #8). + +.. version-1.0.6: + +1.0.6 +===== +:release-date: 2011-03-22 04:00 P.M CET +:release-by: Ask Solem + +* The ``delivery_mode`` aliases (persistent/transient) were not automatically + converted to integer, and would cause a crash if using the amqplib + transport. + +* Redis: The redis-py :exc:`InvalidData` exception suddenly changed name to + :exc:`DataError`. + +* The :envvar:`KOMBU_LOG_DEBUG` environment variable can now be set to log all + channel method calls. + + Support for the following environment variables have been added: + + * :envvar:`KOMBU_LOG_CHANNEL` will wrap channels in an object that + logs every method call. + + * :envvar:`KOMBU_LOG_DEBUG` both enables channel logging and configures the + root logger to emit messages to standard error. + + **Example Usage**:: + + $ KOMBU_LOG_DEBUG=1 python + >>> from kombu import Connection + >>> conn = Connection() + >>> channel = conn.channel() + Start from server, version: 8.0, properties: + {u'product': 'RabbitMQ',.............. } + Open OK! known_hosts [] + using channel_id: 1 + Channel open + >>> channel.queue_declare('myq', passive=True) + [Kombu channel:1] queue_declare('myq', passive=True) + (u'myq', 0, 1) + +.. _version-1.0.5: + +1.0.5 +===== +:release-date: 2011-03-17 04:00 P.M CET +:release-by: Ask Solem + +* Fixed memory leak when creating virtual channels. All virtual transports + affected (redis, mongodb, memory, django, sqlalchemy, couchdb, beanstalk). + +* Virtual Transports: Fixed potential race condition when acking messages. + + If you have been affected by this, the error would show itself as an + exception raised by the OrderedDict implementation. (``object no longer + exists``). + +* MongoDB transport requires the ``findandmodify`` command only available in + MongoDB 1.3+, so now raises an exception if connected to an incompatible + server version. + +* Virtual Transports: ``basic.cancel`` should not try to remove unknown + consumer tag. + +.. _version-1.0.4: + +1.0.4 +===== +:release-date: 2011-02-28 04:00 P.M CET +:release-by: Ask Solem + +* Added Transport.polling_interval + + Used by django-kombu to increase the time to sleep between SELECTs when + there are no messages in the queue. + + Users of django-kombu should upgrade to django-kombu v0.9.2. + +.. _version-1.0.3: + +1.0.3 +===== +:release-date: 2011-02-12 04:00 P.M CET +:release-by: Ask Solem + +* ConnectionPool: Re-connect if amqplib connection closed + +* Adds ``Queue.as_dict`` + ``Exchange.as_dict``. + +* Copyright headers updated to include 2011. + +.. _version-1.0.2: + +1.0.2 +===== +:release-date: 2011-01-31 10:45 P.M CET +:release-by: Ask Solem + +* amqplib: Message properties were not set properly. +* Ghettoq backend names are now automatically translated to the new names. + +.. _version-1.0.1: + +1.0.1 +===== +:release-date: 2011-01-28 12:00 P.M CET +:release-by: Ask Solem + +* Redis: Now works with Linux (epoll) + +.. _version-1.0.0: + +1.0.0 +===== +:release-date: 2011-01-27 12:00 P.M CET +:release-by: Ask Solem + +* Initial release + +.. _version-0.1.0: + +0.1.0 +===== +:release-date: 2010-07-22 04:20 P.M CET +:release-by: Ask Solem + +* Initial fork of carrot diff --git a/FAQ b/FAQ new file mode 100644 index 0000000..8275c82 --- /dev/null +++ b/FAQ @@ -0,0 +1,16 @@ +============================ + Frequently Asked Questions +============================ + +Questions +========= + +Q: Message.reject doesn't work? +-------------------------------------- +**Answer**: Earlier versions of RabbitMQ did not implement ``basic.reject``, +so make sure your version is recent enough to support it. + +Q: Message.requeue doesn't work? +-------------------------------------- + +**Answer**: See _`Message.reject doesn't work?` diff --git a/INSTALL b/INSTALL new file mode 100644 index 0000000..ac81882 --- /dev/null +++ b/INSTALL @@ -0,0 +1,21 @@ +Installation +============ + +You can install ``kombu`` either via the Python Package Index (PyPI) +or from source. + +To install using ``pip``,:: + + $ pip install kombu + + +To install using ``easy_install``,:: + + $ easy_install kombu + + +If you have downloaded a source tarball you can install it +by doing the following,:: + + $ python setup.py build + # python setup.py install # as root diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..2268b3b --- /dev/null +++ b/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved. +Copyright (c) 2009-2012, Ask Solem & contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Ask Solem nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Ask Solem OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..35c0ac8 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,17 @@ +include AUTHORS +include Changelog +include FAQ +include INSTALL +include LICENSE +include MANIFEST.in +include README.rst +include README +include THANKS +include TODO +include setup.cfg +recursive-include extra * +recursive-include docs * +recursive-include kombu *.py +recursive-include requirements *.txt +recursive-include funtests *.py setup.cfg +recursive-include examples *.py diff --git a/PKG-INFO b/PKG-INFO new file mode 100644 index 0000000..88e69c4 --- /dev/null +++ b/PKG-INFO @@ -0,0 +1,355 @@ +Metadata-Version: 1.1 +Name: kombu +Version: 3.0.21 +Summary: Messaging library for Python +Home-page: http://kombu.readthedocs.org +Author: Ask Solem +Author-email: ask@celeryproject.org +License: UNKNOWN +Description: .. _kombu-index: + + ======================================== + kombu - Messaging library for Python + ======================================== + + :Version: 3.0.21 + + `Kombu` is a messaging library for Python. + + The aim of `Kombu` is to make messaging in Python as easy as possible by + providing an idiomatic high-level interface for the AMQ protocol, and also + provide proven and tested solutions to common messaging problems. + + `AMQP`_ is the Advanced Message Queuing Protocol, an open standard protocol + for message orientation, queuing, routing, reliability and security, + for which the `RabbitMQ`_ messaging server is the most popular implementation. + + Features + ======== + + * Allows application authors to support several message server + solutions by using pluggable transports. + + * AMQP transport using the `py-amqp`_ or `librabbitmq`_ client libraries. + + * High performance AMQP transport written in C - when using `librabbitmq`_ + + This is automatically enabled if librabbitmq is installed:: + + $ pip install librabbitmq + + * Virtual transports makes it really easy to add support for non-AMQP + transports. There is already built-in support for `Redis`_, + `Beanstalk`_, `Amazon SQS`_, `CouchDB`_, `MongoDB`_, `ZeroMQ`_, + `ZooKeeper`_, `SoftLayer MQ`_ and `Pyro`_. + + * You can also use the SQLAlchemy and Django ORM transports to + use a database as the broker. + + * In-memory transport for unit testing. + + * Supports automatic encoding, serialization and compression of message + payloads. + + * Consistent exception handling across transports. + + * The ability to ensure that an operation is performed by gracefully + handling connection and channel errors. + + * Several annoyances with `amqplib`_ has been fixed, like supporting + timeouts and the ability to wait for events on more than one channel. + + * Projects already using `carrot`_ can easily be ported by using + a compatibility layer. + + For an introduction to AMQP you should read the article `Rabbits and warrens`_, + and the `Wikipedia article about AMQP`_. + + .. _`RabbitMQ`: http://www.rabbitmq.com/ + .. _`AMQP`: http://amqp.org + .. _`py-amqp`: http://pypi.python.org/pypi/amqp/ + .. _`Redis`: http://code.google.com/p/redis/ + .. _`Amazon SQS`: http://aws.amazon.com/sqs/ + .. _`MongoDB`: http://www.mongodb.org/ + .. _`CouchDB`: http://couchdb.apache.org/ + .. _`ZeroMQ`: http://zeromq.org/ + .. _`Zookeeper`: https://zookeeper.apache.org/ + .. _`Beanstalk`: http://kr.github.com/beanstalkd/ + .. _`Rabbits and warrens`: http://blogs.digitar.com/jjww/2009/01/rabbits-and-warrens/ + .. _`amqplib`: http://barryp.org/software/py-amqplib/ + .. _`Wikipedia article about AMQP`: http://en.wikipedia.org/wiki/AMQP + .. _`carrot`: http://pypi.python.org/pypi/carrot/ + .. _`librabbitmq`: http://pypi.python.org/pypi/librabbitmq + .. _`Pyro`: http://pythonhosting.org/Pyro + .. _`SoftLayer MQ`: http://www.softlayer.com/services/additional/message-queue + + + .. _transport-comparison: + + Transport Comparison + ==================== + + +---------------+----------+------------+------------+---------------+ + | **Client** | **Type** | **Direct** | **Topic** | **Fanout** | + +---------------+----------+------------+------------+---------------+ + | *amqp* | Native | Yes | Yes | Yes | + +---------------+----------+------------+------------+---------------+ + | *redis* | Virtual | Yes | Yes | Yes (PUB/SUB) | + +---------------+----------+------------+------------+---------------+ + | *mongodb* | Virtual | Yes | Yes | Yes | + +---------------+----------+------------+------------+---------------+ + | *beanstalk* | Virtual | Yes | Yes [#f1]_ | No | + +---------------+----------+------------+------------+---------------+ + | *SQS* | Virtual | Yes | Yes [#f1]_ | Yes [#f2]_ | + +---------------+----------+------------+------------+---------------+ + | *couchdb* | Virtual | Yes | Yes [#f1]_ | No | + +---------------+----------+------------+------------+---------------+ + | *zookeeper* | Virtual | Yes | Yes [#f1]_ | No | + +---------------+----------+------------+------------+---------------+ + | *in-memory* | Virtual | Yes | Yes [#f1]_ | No | + +---------------+----------+------------+------------+---------------+ + | *django* | Virtual | Yes | Yes [#f1]_ | No | + +---------------+----------+------------+------------+---------------+ + | *sqlalchemy* | Virtual | Yes | Yes [#f1]_ | No | + +---------------+----------+------------+------------+---------------+ + | *SLMQ* | Virtual | Yes | Yes [#f1]_ | No | + +---------------+----------+------------+------------+---------------+ + + + .. [#f1] Declarations only kept in memory, so exchanges/queues + must be declared by all clients that needs them. + + .. [#f2] Fanout supported via storing routing tables in SimpleDB. + Disabled by default, but can be enabled by using the + ``supports_fanout`` transport option. + + + Documentation + ------------- + + Kombu is using Sphinx, and the latest documentation can be found here: + + http://kombu.readthedocs.org/ + + Quick overview + -------------- + + :: + + from kombu import Connection, Exchange, Queue + + media_exchange = Exchange('media', 'direct', durable=True) + video_queue = Queue('video', exchange=media_exchange, routing_key='video') + + def process_media(body, message): + print body + message.ack() + + # connections + with Connection('amqp://guest:guest@localhost//') as conn: + + # produce + producer = conn.Producer(serializer='json') + producer.publish({'name': '/tmp/lolcat1.avi', 'size': 1301013}, + exchange=media_exchange, routing_key='video', + declare=[video_queue]) + + # the declare above, makes sure the video queue is declared + # so that the messages can be delivered. + # It's a best practice in Kombu to have both publishers and + # consumers declare the queue. You can also declare the + # queue manually using: + # video_queue(conn).declare() + + # consume + with conn.Consumer(video_queue, callbacks=[process_media]) as consumer: + # Process messages and handle events on all channels + while True: + conn.drain_events() + + # Consume from several queues on the same channel: + video_queue = Queue('video', exchange=media_exchange, key='video') + image_queue = Queue('image', exchange=media_exchange, key='image') + + with connection.Consumer([video_queue, image_queue], + callbacks=[process_media]) as consumer: + while True: + connection.drain_events() + + + Or handle channels manually:: + + with connection.channel() as channel: + producer = Producer(channel, ...) + consumer = Producer(channel) + + + All objects can be used outside of with statements too, + just remember to close the objects after use:: + + from kombu import Connection, Consumer, Producer + + connection = Connection() + # ... + connection.release() + + consumer = Consumer(channel_or_connection, ...) + consumer.register_callback(my_callback) + consumer.consume() + # .... + consumer.cancel() + + + `Exchange` and `Queue` are simply declarations that can be pickled + and used in configuration files etc. + + They also support operations, but to do so they need to be bound + to a channel. + + Binding exchanges and queues to a connection will make it use + that connections default channel. + + :: + + >>> exchange = Exchange('tasks', 'direct') + + >>> connection = Connection() + >>> bound_exchange = exchange(connection) + >>> bound_exchange.delete() + + # the original exchange is not affected, and stays unbound. + >>> exchange.delete() + raise NotBoundError: Can't call delete on Exchange not bound to + a channel. + + Installation + ============ + + You can install `Kombu` either via the Python Package Index (PyPI) + or from source. + + To install using `pip`,:: + + $ pip install kombu + + To install using `easy_install`,:: + + $ easy_install kombu + + If you have downloaded a source tarball you can install it + by doing the following,:: + + $ python setup.py build + # python setup.py install # as root + + + Terminology + =========== + + There are some concepts you should be familiar with before starting: + + * Producers + + Producers sends messages to an exchange. + + * Exchanges + + Messages are sent to exchanges. Exchanges are named and can be + configured to use one of several routing algorithms. The exchange + routes the messages to consumers by matching the routing key in the + message with the routing key the consumer provides when binding to + the exchange. + + * Consumers + + Consumers declares a queue, binds it to a exchange and receives + messages from it. + + * Queues + + Queues receive messages sent to exchanges. The queues are declared + by consumers. + + * Routing keys + + Every message has a routing key. The interpretation of the routing + key depends on the exchange type. There are four default exchange + types defined by the AMQP standard, and vendors can define custom + types (so see your vendors manual for details). + + These are the default exchange types defined by AMQP/0.8: + + * Direct exchange + + Matches if the routing key property of the message and + the `routing_key` attribute of the consumer are identical. + + * Fan-out exchange + + Always matches, even if the binding does not have a routing + key. + + * Topic exchange + + Matches the routing key property of the message by a primitive + pattern matching scheme. The message routing key then consists + of words separated by dots (`"."`, like domain names), and + two special characters are available; star (`"*"`) and hash + (`"#"`). The star matches any word, and the hash matches + zero or more words. For example `"*.stock.#"` matches the + routing keys `"usd.stock"` and `"eur.stock.db"` but not + `"stock.nasdaq"`. + + Getting Help + ============ + + Mailing list + ------------ + + Join the `carrot-users`_ mailing list. + + .. _`carrot-users`: http://groups.google.com/group/carrot-users/ + + Bug tracker + =========== + + If you have any suggestions, bug reports or annoyances please report them + to our issue tracker at http://github.com/celery/kombu/issues/ + + Contributing + ============ + + Development of `Kombu` happens at Github: http://github.com/celery/kombu + + You are highly encouraged to participate in the development. If you don't + like Github (for some reason) you're welcome to send regular patches. + + License + ======= + + This software is licensed under the `New BSD License`. See the `LICENSE` + file in the top distribution directory for the full license text. + + .. image:: https://d2weczhvl823v0.cloudfront.net/celery/kombu/trend.png + :alt: Bitdeli badge + :target: https://bitdeli.com/free + +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: Implementation :: Jython +Classifier: Intended Audience :: Developers +Classifier: Topic :: Communications +Classifier: Topic :: System :: Distributed Computing +Classifier: Topic :: System :: Networking +Classifier: Topic :: Software Development :: Libraries :: Python Modules diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..de4fa25 --- /dev/null +++ b/README.rst @@ -0,0 +1,327 @@ +.. _kombu-index: + +======================================== + kombu - Messaging library for Python +======================================== + +:Version: 3.0.21 + +`Kombu` is a messaging library for Python. + +The aim of `Kombu` is to make messaging in Python as easy as possible by +providing an idiomatic high-level interface for the AMQ protocol, and also +provide proven and tested solutions to common messaging problems. + +`AMQP`_ is the Advanced Message Queuing Protocol, an open standard protocol +for message orientation, queuing, routing, reliability and security, +for which the `RabbitMQ`_ messaging server is the most popular implementation. + +Features +======== + +* Allows application authors to support several message server + solutions by using pluggable transports. + + * AMQP transport using the `py-amqp`_ or `librabbitmq`_ client libraries. + + * High performance AMQP transport written in C - when using `librabbitmq`_ + + This is automatically enabled if librabbitmq is installed:: + + $ pip install librabbitmq + + * Virtual transports makes it really easy to add support for non-AMQP + transports. There is already built-in support for `Redis`_, + `Beanstalk`_, `Amazon SQS`_, `CouchDB`_, `MongoDB`_, `ZeroMQ`_, + `ZooKeeper`_, `SoftLayer MQ`_ and `Pyro`_. + + * You can also use the SQLAlchemy and Django ORM transports to + use a database as the broker. + + * In-memory transport for unit testing. + +* Supports automatic encoding, serialization and compression of message + payloads. + +* Consistent exception handling across transports. + +* The ability to ensure that an operation is performed by gracefully + handling connection and channel errors. + +* Several annoyances with `amqplib`_ has been fixed, like supporting + timeouts and the ability to wait for events on more than one channel. + +* Projects already using `carrot`_ can easily be ported by using + a compatibility layer. + +For an introduction to AMQP you should read the article `Rabbits and warrens`_, +and the `Wikipedia article about AMQP`_. + +.. _`RabbitMQ`: http://www.rabbitmq.com/ +.. _`AMQP`: http://amqp.org +.. _`py-amqp`: http://pypi.python.org/pypi/amqp/ +.. _`Redis`: http://code.google.com/p/redis/ +.. _`Amazon SQS`: http://aws.amazon.com/sqs/ +.. _`MongoDB`: http://www.mongodb.org/ +.. _`CouchDB`: http://couchdb.apache.org/ +.. _`ZeroMQ`: http://zeromq.org/ +.. _`Zookeeper`: https://zookeeper.apache.org/ +.. _`Beanstalk`: http://kr.github.com/beanstalkd/ +.. _`Rabbits and warrens`: http://blogs.digitar.com/jjww/2009/01/rabbits-and-warrens/ +.. _`amqplib`: http://barryp.org/software/py-amqplib/ +.. _`Wikipedia article about AMQP`: http://en.wikipedia.org/wiki/AMQP +.. _`carrot`: http://pypi.python.org/pypi/carrot/ +.. _`librabbitmq`: http://pypi.python.org/pypi/librabbitmq +.. _`Pyro`: http://pythonhosting.org/Pyro +.. _`SoftLayer MQ`: http://www.softlayer.com/services/additional/message-queue + + +.. _transport-comparison: + +Transport Comparison +==================== + ++---------------+----------+------------+------------+---------------+ +| **Client** | **Type** | **Direct** | **Topic** | **Fanout** | ++---------------+----------+------------+------------+---------------+ +| *amqp* | Native | Yes | Yes | Yes | ++---------------+----------+------------+------------+---------------+ +| *redis* | Virtual | Yes | Yes | Yes (PUB/SUB) | ++---------------+----------+------------+------------+---------------+ +| *mongodb* | Virtual | Yes | Yes | Yes | ++---------------+----------+------------+------------+---------------+ +| *beanstalk* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *SQS* | Virtual | Yes | Yes [#f1]_ | Yes [#f2]_ | ++---------------+----------+------------+------------+---------------+ +| *couchdb* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *zookeeper* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *in-memory* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *django* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *sqlalchemy* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *SLMQ* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ + + +.. [#f1] Declarations only kept in memory, so exchanges/queues + must be declared by all clients that needs them. + +.. [#f2] Fanout supported via storing routing tables in SimpleDB. + Disabled by default, but can be enabled by using the + ``supports_fanout`` transport option. + + +Documentation +------------- + +Kombu is using Sphinx, and the latest documentation can be found here: + + http://kombu.readthedocs.org/ + +Quick overview +-------------- + +:: + + from kombu import Connection, Exchange, Queue + + media_exchange = Exchange('media', 'direct', durable=True) + video_queue = Queue('video', exchange=media_exchange, routing_key='video') + + def process_media(body, message): + print body + message.ack() + + # connections + with Connection('amqp://guest:guest@localhost//') as conn: + + # produce + producer = conn.Producer(serializer='json') + producer.publish({'name': '/tmp/lolcat1.avi', 'size': 1301013}, + exchange=media_exchange, routing_key='video', + declare=[video_queue]) + + # the declare above, makes sure the video queue is declared + # so that the messages can be delivered. + # It's a best practice in Kombu to have both publishers and + # consumers declare the queue. You can also declare the + # queue manually using: + # video_queue(conn).declare() + + # consume + with conn.Consumer(video_queue, callbacks=[process_media]) as consumer: + # Process messages and handle events on all channels + while True: + conn.drain_events() + + # Consume from several queues on the same channel: + video_queue = Queue('video', exchange=media_exchange, key='video') + image_queue = Queue('image', exchange=media_exchange, key='image') + + with connection.Consumer([video_queue, image_queue], + callbacks=[process_media]) as consumer: + while True: + connection.drain_events() + + +Or handle channels manually:: + + with connection.channel() as channel: + producer = Producer(channel, ...) + consumer = Producer(channel) + + +All objects can be used outside of with statements too, +just remember to close the objects after use:: + + from kombu import Connection, Consumer, Producer + + connection = Connection() + # ... + connection.release() + + consumer = Consumer(channel_or_connection, ...) + consumer.register_callback(my_callback) + consumer.consume() + # .... + consumer.cancel() + + +`Exchange` and `Queue` are simply declarations that can be pickled +and used in configuration files etc. + +They also support operations, but to do so they need to be bound +to a channel. + +Binding exchanges and queues to a connection will make it use +that connections default channel. + +:: + + >>> exchange = Exchange('tasks', 'direct') + + >>> connection = Connection() + >>> bound_exchange = exchange(connection) + >>> bound_exchange.delete() + + # the original exchange is not affected, and stays unbound. + >>> exchange.delete() + raise NotBoundError: Can't call delete on Exchange not bound to + a channel. + +Installation +============ + +You can install `Kombu` either via the Python Package Index (PyPI) +or from source. + +To install using `pip`,:: + + $ pip install kombu + +To install using `easy_install`,:: + + $ easy_install kombu + +If you have downloaded a source tarball you can install it +by doing the following,:: + + $ python setup.py build + # python setup.py install # as root + + +Terminology +=========== + +There are some concepts you should be familiar with before starting: + + * Producers + + Producers sends messages to an exchange. + + * Exchanges + + Messages are sent to exchanges. Exchanges are named and can be + configured to use one of several routing algorithms. The exchange + routes the messages to consumers by matching the routing key in the + message with the routing key the consumer provides when binding to + the exchange. + + * Consumers + + Consumers declares a queue, binds it to a exchange and receives + messages from it. + + * Queues + + Queues receive messages sent to exchanges. The queues are declared + by consumers. + + * Routing keys + + Every message has a routing key. The interpretation of the routing + key depends on the exchange type. There are four default exchange + types defined by the AMQP standard, and vendors can define custom + types (so see your vendors manual for details). + + These are the default exchange types defined by AMQP/0.8: + + * Direct exchange + + Matches if the routing key property of the message and + the `routing_key` attribute of the consumer are identical. + + * Fan-out exchange + + Always matches, even if the binding does not have a routing + key. + + * Topic exchange + + Matches the routing key property of the message by a primitive + pattern matching scheme. The message routing key then consists + of words separated by dots (`"."`, like domain names), and + two special characters are available; star (`"*"`) and hash + (`"#"`). The star matches any word, and the hash matches + zero or more words. For example `"*.stock.#"` matches the + routing keys `"usd.stock"` and `"eur.stock.db"` but not + `"stock.nasdaq"`. + +Getting Help +============ + +Mailing list +------------ + +Join the `carrot-users`_ mailing list. + +.. _`carrot-users`: http://groups.google.com/group/carrot-users/ + +Bug tracker +=========== + +If you have any suggestions, bug reports or annoyances please report them +to our issue tracker at http://github.com/celery/kombu/issues/ + +Contributing +============ + +Development of `Kombu` happens at Github: http://github.com/celery/kombu + +You are highly encouraged to participate in the development. If you don't +like Github (for some reason) you're welcome to send regular patches. + +License +======= + +This software is licensed under the `New BSD License`. See the `LICENSE` +file in the top distribution directory for the full license text. + +.. image:: https://d2weczhvl823v0.cloudfront.net/celery/kombu/trend.png + :alt: Bitdeli badge + :target: https://bitdeli.com/free diff --git a/THANKS b/THANKS new file mode 100644 index 0000000..85612f6 --- /dev/null +++ b/THANKS @@ -0,0 +1,32 @@ +======== + THANKS +======== + +From ``carrot`` THANKS file +=========================== + +* Thanks to Barry Pederson for the py-amqplib library. +* Thanks to Grégoire Cachet for bug reports. +* Thanks to Martin Mahner for the Sphinx theme. +* Thanks to jcater for bug reports. +* Thanks to sebest for bug reports. +* Thanks to greut for bug reports + +From ``django-kombu`` THANKS file +================================= + +* Thanks to Rajesh Dhawan and other authors of django-queue-service + for the database model implementation. + See http://code.google.com/p/django-queue-service/. + +From ``kombu-sqlalchemy`` THANKS file +===================================== + +* Thanks to Rajesh Dhawan and other authors of django-queue-service + for the database model implementation. + See http://code.google.com/p/django-queue-service/. + +* Thanks to haridsv for the draft SQLAlchemy port (which can still + be found at http://github.com/haridsv/celery-alchemy-poc) + + diff --git a/TODO b/TODO new file mode 100644 index 0000000..6b9d9ea --- /dev/null +++ b/TODO @@ -0,0 +1,2 @@ +Please see our Issue Tracker at GitHub: + http://github.com/celery/kombu/issues diff --git a/docs/.static/.keep b/docs/.static/.keep new file mode 100644 index 0000000..e69de29 diff --git a/docs/.templates/sidebarintro.html b/docs/.templates/sidebarintro.html new file mode 100644 index 0000000..09d6a33 --- /dev/null +++ b/docs/.templates/sidebarintro.html @@ -0,0 +1,7 @@ +

Kombu

+

+ Kombu is a messaging library for Python. +

+ diff --git a/docs/.templates/sidebarlogo.html b/docs/.templates/sidebarlogo.html new file mode 100644 index 0000000..6c398ba --- /dev/null +++ b/docs/.templates/sidebarlogo.html @@ -0,0 +1,3 @@ + diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..ef87680 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,75 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d .build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html web pickle htmlhelp latex changes linkcheck + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview over all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + +clean: + -rm -rf .build/* + +html: + mkdir -p .build/html .build/doctrees + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) .build/html + @echo + @echo "Build finished. The HTML pages are in .build/html." + +pickle: + mkdir -p .build/pickle .build/doctrees + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) .build/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +web: pickle + +json: + mkdir -p .build/json .build/doctrees + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) .build/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + mkdir -p .build/htmlhelp .build/doctrees + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) .build/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in .build/htmlhelp." + +latex: + mkdir -p .build/latex .build/doctrees + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) .build/latex + @echo + @echo "Build finished; the LaTeX files are in .build/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + mkdir -p .build/changes .build/doctrees + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) .build/changes + @echo + @echo "The overview file is in .build/changes." + +linkcheck: + mkdir -p .build/linkcheck .build/doctrees + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) .build/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in .build/linkcheck/output.txt." diff --git a/docs/_ext/applyxrefs.py b/docs/_ext/applyxrefs.py new file mode 100644 index 0000000..93222a3 --- /dev/null +++ b/docs/_ext/applyxrefs.py @@ -0,0 +1,90 @@ +"""Adds xref targets to the top of files.""" + +import sys +import os + +testing = False + +DONT_TOUCH = ('./index.txt', ) + + +def target_name(fn): + if fn.endswith('.txt'): + fn = fn[:-4] + return '_' + fn.lstrip('./').replace('/', '-') + + +def process_file(fn, lines): + lines.insert(0, '\n') + lines.insert(0, '.. %s:\n' % target_name(fn)) + try: + f = open(fn, 'w') + except IOError: + print("Can't open %s for writing. Not touching it." % fn) + return + try: + f.writelines(lines) + except IOError: + print("Can't write to %s. Not touching it." % fn) + finally: + f.close() + + +def has_target(fn): + try: + f = open(fn, 'r') + except IOError: + print("Can't open %s. Not touching it." % fn) + return (True, None) + readok = True + try: + lines = f.readlines() + except IOError: + print("Can't read %s. Not touching it." % fn) + readok = False + finally: + f.close() + if not readok: + return (True, None) + + #print fn, len(lines) + if len(lines) < 1: + print("Not touching empty file %s." % fn) + return (True, None) + if lines[0].startswith('.. _'): + return (True, None) + return (False, lines) + + +def main(argv=None): + if argv is None: + argv = sys.argv + + if len(argv) == 1: + argv.extend('.') + + files = [] + for root in argv[1:]: + for (dirpath, dirnames, filenames) in os.walk(root): + files.extend([(dirpath, f) for f in filenames]) + files.sort() + files = [os.path.join(p, fn) for p, fn in files if fn.endswith('.txt')] + #print files + + for fn in files: + if fn in DONT_TOUCH: + print("Skipping blacklisted file %s." % fn) + continue + + target_found, lines = has_target(fn) + if not target_found: + if testing: + print '%s: %s' % (fn, lines[0]), + else: + print "Adding xref to %s" % fn + process_file(fn, lines) + else: + print "Skipping %s: already has a xref" % fn + +if __name__ == '__main__': + sys.exit(main()) diff --git a/docs/_ext/literals_to_xrefs.py b/docs/_ext/literals_to_xrefs.py new file mode 100644 index 0000000..d01a422 --- /dev/null +++ b/docs/_ext/literals_to_xrefs.py @@ -0,0 +1,180 @@ +""" +Runs through a reST file looking for old-style literals, and helps replace them +with new-style references. +""" + +import re +import sys +import shelve + +try: + input = input +except NameError: + input = raw_input # noqa + +refre = re.compile(r'``([^`\s]+?)``') + +ROLES = ( + 'attr', + 'class', + "djadmin", + 'data', + 'exc', + 'file', + 'func', + 'lookup', + 'meth', + 'mod', + "djadminopt", + "ref", + "setting", + "term", + "tfilter", + "ttag", + + # special + "skip", +) + +ALWAYS_SKIP = [ + "NULL", + "True", + "False", +] + + +def fixliterals(fname): + data = open(fname).read() + + last = 0 + new = [] + storage = shelve.open("/tmp/literals_to_xref.shelve") + lastvalues = storage.get("lastvalues", {}) + + for m in refre.finditer(data): + + new.append(data[last:m.start()]) + last = m.end() + + line_start = data.rfind("\n", 0, m.start()) + line_end = data.find("\n", m.end()) + prev_start = data.rfind("\n", 0, line_start) + next_end = data.find("\n", line_end + 1) + + # Skip always-skip stuff + if m.group(1) in ALWAYS_SKIP: + new.append(m.group(0)) + continue + + # skip when the next line is a title + next_line = data[m.end():next_end].strip() + if next_line[0] in "!-/:-@[-`{-~" and \ + all(c == next_line[0] for c in next_line): + new.append(m.group(0)) + continue + + sys.stdout.write("\n" + "-" * 80 + "\n") + sys.stdout.write(data[prev_start + 1:m.start()]) + sys.stdout.write(colorize(m.group(0), fg="red")) + sys.stdout.write(data[m.end():next_end]) + sys.stdout.write("\n\n") + + replace_type = None + while replace_type is None: + replace_type = input( + colorize("Replace role: ", fg="yellow")).strip().lower() + if replace_type and replace_type not in ROLES: + replace_type = None + + if replace_type == "": + new.append(m.group(0)) + continue + + if replace_type == "skip": + new.append(m.group(0)) + ALWAYS_SKIP.append(m.group(1)) + continue + + default = lastvalues.get(m.group(1), m.group(1)) + if default.endswith("()") and \ + replace_type in ("class", "func", "meth"): + default = default[:-2] + replace_value = input( + colorize("Text [", fg="yellow") + + default + + colorize("]: ", fg="yellow"), + ).strip() + if not replace_value: + replace_value = default + new.append(":%s:`%s`" % (replace_type, replace_value)) + lastvalues[m.group(1)] = replace_value + + new.append(data[last:]) + open(fname, "w").write("".join(new)) + + storage["lastvalues"] = lastvalues + storage.close() + + +def colorize(text='', opts=(), **kwargs): + """ + Returns your text, enclosed in ANSI graphics codes. + + Depends on the keyword arguments 'fg' and 'bg', and the contents of + the opts tuple/list. + + Returns the RESET code if no parameters are given. + + Valid colors: + 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' + + Valid options: + 'bold' + 'underscore' + 'blink' + 'reverse' + 'conceal' + 'noreset' - string will not be auto-terminated with the RESET code + + Examples: + colorize('hello', fg='red', bg='blue', opts=('blink',)) + colorize() + colorize('goodbye', opts=('underscore',)) + print colorize('first line', fg='red', opts=('noreset',)) + print 'this should be red too' + print colorize('and so should this') + print 'this should not be red' + """ + color_names = ('black', 'red', 'green', 'yellow', + 'blue', 'magenta', 'cyan', 'white') + foreground = dict([(color_names[x], '3%s' % x) for x in range(8)]) + background = dict([(color_names[x], '4%s' % x) for x in range(8)]) + + RESET = '0' + opt_dict = {'bold': '1', + 'underscore': '4', + 'blink': '5', + 'reverse': '7', + 'conceal': '8'} + + text = str(text) + code_list = [] + if text == '' and len(opts) == 1 and opts[0] == 'reset': + return '\x1b[%sm' % RESET + for k, v in kwargs.iteritems(): + if k == 'fg': + code_list.append(foreground[v]) + elif k == 'bg': + code_list.append(background[v]) + for o in opts: + if o in opt_dict: + code_list.append(opt_dict[o]) + if 'noreset' not in opts: + text = text + '\x1b[%sm' % RESET + return ('\x1b[%sm' % ';'.join(code_list)) + text + +if __name__ == '__main__': + try: + fixliterals(sys.argv[1]) + except (KeyboardInterrupt, SystemExit): + print diff --git a/docs/_theme/celery/static/celery.css_t b/docs/_theme/celery/static/celery.css_t new file mode 100644 index 0000000..4274f31 --- /dev/null +++ b/docs/_theme/celery/static/celery.css_t @@ -0,0 +1,394 @@ +/* + * celery.css_t + * ~~~~~~~~~~~~ + * + * :copyright: Copyright 2010 by Armin Ronacher. + * :license: BSD, see LICENSE for details. + */ + +{% set page_width = 940 %} +{% set sidebar_width = 220 %} +{% set body_font_stack = 'Optima, Segoe, "Segoe UI", Candara, Calibri, Arial, sans-serif' %} +{% set headline_font_stack = 'Futura, "Trebuchet MS", Arial, sans-serif' %} +{% set code_font_stack = "'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace" %} + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + align: left; + font-family: {{ body_font_stack }}; + font-size: 17px; + background-color: white; + color: #000; + margin: 30px 0 0 0; + padding: 0; +} + +div.document { + width: {{ page_width }}px; + margin: 0 auto; +} + +div.related { + width: {{ page_width - 20 }}px; + padding: 5px 10px; + background: #F2FCEE; + margin: 15px auto 15px auto; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 {{ sidebar_width }}px; +} + +div.sphinxsidebar { + width: {{ sidebar_width }}px; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.body { + background-color: #ffffff; + color: #3E4349; + padding: 0 30px 0 30px; +} + +img.celerylogo { + padding: 0 0 10px 10px; + float: right; +} + +div.footer { + width: {{ page_width - 15 }}px; + margin: 10px auto 30px auto; + padding-right: 15px; + font-size: 14px; + color: #888; + text-align: right; +} + +div.footer a { + color: #888; +} + +div.sphinxsidebar a { + color: #444; + text-decoration: none; + border-bottom: 1px dashed #DCF0D5; +} + +div.sphinxsidebar a:hover { + border-bottom: 1px solid #999; +} + +div.sphinxsidebar { + font-size: 14px; + line-height: 1.5; +} + +div.sphinxsidebarwrapper { + padding: 7px 10px; +} + +div.sphinxsidebarwrapper p.logo { + padding: 0 0 20px 0; + margin: 0; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: {{ headline_font_stack }}; + color: #444; + font-size: 24px; + font-weight: normal; + margin: 0 0 5px 0; + padding: 0; +} + +div.sphinxsidebar h4 { + font-size: 20px; +} + +div.sphinxsidebar h3 a { + color: #444; +} + +div.sphinxsidebar p.logo a, +div.sphinxsidebar h3 a, +div.sphinxsidebar p.logo a:hover, +div.sphinxsidebar h3 a:hover { + border: none; +} + +div.sphinxsidebar p { + color: #555; + margin: 10px 0; +} + +div.sphinxsidebar ul { + margin: 10px 0; + padding: 0; + color: #000; +} + +div.sphinxsidebar input { + border: 1px solid #ccc; + font-family: {{ body_font_stack }}; + font-size: 1em; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #348613; + text-decoration: underline; +} + +a:hover { + color: #59B833; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: {{ headline_font_stack }}; + font-weight: normal; + margin: 30px 0px 10px 0px; + padding: 0; +} + +div.body h1 { margin-top: 0; padding-top: 0; font-size: 200%; } +div.body h2 { font-size: 180%; } +div.body h3 { font-size: 150%; } +div.body h4 { font-size: 130%; } +div.body h5 { font-size: 100%; } +div.body h6 { font-size: 100%; } + +div.body h1 a.toc-backref, +div.body h2 a.toc-backref, +div.body h3 a.toc-backref, +div.body h4 a.toc-backref, +div.body h5 a.toc-backref, +div.body h6 a.toc-backref { + color: inherit!important; + text-decoration: none; +} + +a.headerlink { + color: #ddd; + padding: 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + color: #444; + background: #eaeaea; +} + +div.body p, div.body dd, div.body li { + line-height: 1.4em; +} + +div.admonition { + background: #fafafa; + margin: 20px -30px; + padding: 10px 30px; + border-top: 1px solid #ccc; + border-bottom: 1px solid #ccc; +} + +div.admonition p.admonition-title { + font-family: {{ headline_font_stack }}; + font-weight: normal; + font-size: 24px; + margin: 0 0 10px 0; + padding: 0; + line-height: 1; +} + +div.admonition p.last { + margin-bottom: 0; +} + +div.highlight{ + background-color: white; +} + +dt:target, .highlight { + background: #FAF3E8; +} + +div.note { + background-color: #eee; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #eee; +} + +div.warning { + background-color: #ffe4e4; + border: 1px solid #f66; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre, tt { + font-family: {{ code_font_stack }}; + font-size: 0.9em; +} + +img.screenshot { +} + +tt.descname, tt.descclassname { + font-size: 0.95em; +} + +tt.descname { + padding-right: 0.08em; +} + +img.screenshot { + -moz-box-shadow: 2px 2px 4px #eee; + -webkit-box-shadow: 2px 2px 4px #eee; + box-shadow: 2px 2px 4px #eee; +} + +table.docutils { + border: 1px solid #888; + -moz-box-shadow: 2px 2px 4px #eee; + -webkit-box-shadow: 2px 2px 4px #eee; + box-shadow: 2px 2px 4px #eee; +} + +table.docutils td, table.docutils th { + border: 1px solid #888; + padding: 0.25em 0.7em; +} + +table.field-list, table.footnote { + border: none; + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + +table.footnote { + margin: 15px 0; + width: 100%; + border: 1px solid #eee; + background: #fdfdfd; + font-size: 0.9em; +} + +table.footnote + table.footnote { + margin-top: -15px; + border-top: none; +} + +table.field-list th { + padding: 0 0.8em 0 0; +} + +table.field-list td { + padding: 0; +} + +table.footnote td.label { + width: 0px; + padding: 0.3em 0 0.3em 0.5em; +} + +table.footnote td { + padding: 0.3em 0.5em; +} + +dl { + margin: 0; + padding: 0; +} + +dl dd { + margin-left: 30px; +} + +blockquote { + margin: 0 0 0 30px; + padding: 0; +} + +ul { + margin: 10px 0 10px 30px; + padding: 0; +} + +pre { + background: #F0FFEB; + padding: 7px 10px; + margin: 15px 0; + border: 1px solid #C7ECB8; + border-radius: 2px; + -moz-border-radius: 2px; + -webkit-border-radius: 2px; + line-height: 1.3em; +} + +tt { + background: #F0FFEB; + color: #222; + /* padding: 1px 2px; */ +} + +tt.xref, a tt { + background: #F0FFEB; + border-bottom: 1px solid white; +} + +a.reference { + text-decoration: none; + border-bottom: 1px dashed #DCF0D5; +} + +a.reference:hover { + border-bottom: 1px solid #6D4100; +} + +a.footnote-reference { + text-decoration: none; + font-size: 0.7em; + vertical-align: top; + border-bottom: 1px dashed #DCF0D5; +} + +a.footnote-reference:hover { + border-bottom: 1px solid #6D4100; +} + +a:hover tt { + background: #EEE; +} diff --git a/docs/_theme/celery/theme.conf b/docs/_theme/celery/theme.conf new file mode 100644 index 0000000..9ad052c --- /dev/null +++ b/docs/_theme/celery/theme.conf @@ -0,0 +1,5 @@ +[theme] +inherit = basic +stylesheet = celery.css + +[options] diff --git a/docs/changelog.rst b/docs/changelog.rst new file mode 100644 index 0000000..28c6107 --- /dev/null +++ b/docs/changelog.rst @@ -0,0 +1,3012 @@ +.. _changelog: + +================ + Change history +================ + +.. _version-3.0.21: + +3.0.21 +====== +:release-date: 2014-07-07 02:00 P.M UTC +:release-by: Ask Solem + +- Fixed remaining bug in ``maybe_declare`` for ``auto_delete`` exchanges. + + Fix contributed by Roger Hu. + +- MongoDB: Creating a channel now properly evaluates a connection (Issue #363). + + Fix contributed by Len Buckens. + +.. _version-3.0.20: + +3.0.20 +====== +:release-date: 2014-06-24 02:30 P.M UTC +:release-by: Ask Solem + +- Reverts change in 3.0.17 where ``maybe_declare`` caches the declaration + of auto_delete queues and exchanges. + + Fix contributed by Roger Hu. + +- Redis: Fixed race condition when using gevent and the channel is closed. + + Fix contributed by Andrew Rodionoff. + +.. _version-3.0.19: + +3.0.19 +====== +:release-date: 2014-06-09 03:10 P.M UTC +:release-by: Ask Solem + +- The wheel distribution did not support Python 2.6 by failing to list + the extra dependencies required. + +- Durable and auto_delete queues/exchanges can be be cached using + ``maybe_declare``. + +.. _version-3.0.18: + +3.0.18 +====== +:release-date: 2014-06-02 06:00 P.M UTC +:release-by: Ask Solem + +- A typo introduced in 3.0.17 caused kombu.async.hub to crash (Issue #360). + +.. _version-3.0.17: + +3.0.17 +====== +:release-date: 2014-06-02 05:00 P.M UTC +:release-by: Ask Solem + +- ``kombu[librabbitmq]`` now depends on librabbitmq 1.5.2. + +- Async: Event loop now selectively removes file descriptors for the mode + it failed in, and keeps others (e.g read vs write). + + Fix contributed by Roger Hu. + +- CouchDB: Now works without userid set. + + Fix contributed by Latitia M. Haskins. + +- SQLAlchemy: Now supports recovery from connection errors. + + Contributed by Felix Schwarz. + +- Redis: Restore at shutdown now works when ack emulation is disabled. + +- :func:`kombu.common.eventloop` accidentally swallowed socket errors. + +- Adds :func:`kombu.utils.url.sanitize_url` + +.. _version-3.0.16: + +3.0.16 +====== +:release-date: 2014-05-06 01:00 P.M UTC +:release-by: Ask Solem + +- ``kombu[librabbitmq]`` now depends on librabbitmq 1.5.1. + +- Redis: Fixes ``TypeError`` problem in ``unregister`` (Issue #342). + + Fix contributed by Tobias Schottdorf. + +- Tests: Some unit tests accidentally required the `redis-py` library. + + Fix contributed by Randy Barlow. + +- librabbitmq: Would crash when using an older version of :mod:`librabbitmq`, + now emits warning instead. + +.. _version-3.0.15: + +3.0.15 +====== +:release-date: 2014-04-15 09:00 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.4.5. + +- RabbitMQ 3.3 changes QoS semantics (Issue #339). + + See the RabbitMQ release notes here: + http://www.rabbitmq.com/blog/2014/04/02/breaking-things-with-rabbitmq-3-3/ + + A new connection property has been added that can be used to detect + whether the remote server is using this new QoS behavior:: + + >>> Connection('amqp://').qos_behavior_matches_spec + False + + so if your application depends on the old semantics you can + use this to set the ``apply_global`` flag appropriately:: + + def update_prefetch_count(channel, new_value): + channel.basic_qos( + 0, new_value, + not channel.connection.client.qos_behavior_matches_spec, + ) + +- Users of :mod:`librabbitmq` is encouraged to upgrade to librabbitmq 1.5.0. + + The ``kombu[librabbitmq]`` extra has been updated to depend on this + version. + +- Pools: Now takes transport options into account when comparing connections + (Issue #333). + +- MongoDB: Fixes Python 3 compatibility. + +- Async: select: Ignore socket errors when attempting to unregister handles + from the loop. + +- Pidbox: Can now be configured to use a serializer other than json, + but specifying a serializer argument to :class:`~kombu.pidbox.Mailbox`. + + Contributed by Dmitry Malinovsky. + +- Message decompression now works with Python 3. + + Fix contributed by Adam Gaca. + +.. _version-3.0.14: + +3.0.14 +====== +:release-date: 2014-03-19 07:00 P.M UTC +:release-by: Ask Solem + +- **MongoDB**: Now endures a connection failover (Issue #123). + + Fix contributed by Alex Koshelev. + +- **MongoDB**: Fixed ``KeyError`` when a replica set member is removed. + + Also fixes celery#971 and celery/#898. + + Fix contributed by Alex Koshelev. + +- **MongoDB**: Fixed MongoDB broadcast cursor re-initialization bug. + + Fix contributed by Alex Koshelev. + +- **Async**: Fixed bug in lax semaphore implementation where in + some usage patterns the limit was not honored correctly. + + Fix contributed by Ionel Cristian Mărieș. + +- **Redis**: Fixed problem with fanout when using Python 3 (Issue #324). + +- **Redis**: Fixed ``AttributeError`` from attempting to close a non-existing + connection (Issue #320). + +.. _version-3.0.13: + +3.0.13 +====== +:release-date: 2014-03-03 04:00 P.M UTC +:release-by: Ask Solem + +- Redis: Fixed serious race condition that could lead to data loss. + + The delivery tags were accidentally set to be an incremental number + local to the channel, but the delivery tags need to be globally + unique so that a message can not overwrite an older message + in the backup store. + + This change is not backwards incompatible and you are encouraged + to update all your system using a previous version as soon as possible. + +- Now depends on :mod:`amqp` 1.4.4. + +- Pidbox: Now makes sure message encoding errors are handled by default, + so that a custom error handler does not need to be specified. + +- Redis: The fanout exchange can now use AMQP patterns to route and filter + messages. + + This change is backwards incompatible and must be enabled with + the ``fanout_patterns`` transport option:: + + >>> conn = kombu.Connection('redis://', transport_options={ + ... 'fanout_patterns': True, + ... }) + + When enabled the exchange will work like an amqp topic exchange + if the binding key is a pattern. + + This is planned to be default behavior in the future. + +- Redis: Fixed ``cycle`` no such attribute error. + +.. _version-3.0.12: + +3.0.12 +====== +:release-date: 2014-02-09 03:50 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.4.3. + +- Fixes Python 3.4 logging incompatibility (Issue #311). + +- Redis: Now properly handles unknown pub/sub messages. + + Fix contributed by Sam Stavinoha. + +- amqplib: Fixed bug where more bytes were requested from the socket + than necessary. + + Fix contributed by Ionel Cristian Mărieș. + +.. _version-3.0.11: + +3.0.11 +====== +:release-date: 2014-02-03 05:00 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.4.2. + +- Now always trusts messages of type `application/data` and `application/text` + or which have an unspecified content type (Issue #306). + +- Compression errors are now handled as decode errors and will trigger + the ``Consumer.on_decode_error`` callback if specified. + +- New ``kombu.Connection.get_heartbeat_interval()`` method that can be + used to access the negotiated heartbeat value. + +- `kombu.common.oid_for` no longer uses the MAC address of the host, but + instead uses a process-wide UUID4 as a node id. + + This avoids a call to `uuid.getnode()` at module scope. + +- Hub.add: Now normalizes registered fileno. + + Contributed by Ionel Cristian Mărieș. + +- SQS: Fixed bug where the prefetch count limit was not respected. + +.. _version-3.0.10: + +3.0.10 +====== +:release-date: 2014-01-17 05:40 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.4.1. + +- ``maybe_declare`` now raises a "recoverable connection error" if + the channel is disconnected instead of a :exc:`ChannelError` so that + the operation can be retried. + +- Redis: ``Consumer.cancel()`` is now thread safe. + + This fixes an issue when using gevent/eventlet and a + message is handled after the consumer is cancelled resulting + in a "message for queue without consumers" error. + +- Retry operations would not always respect the interval_start + value when calculating the time to sleep for (Issue #303). + + Fix contributed by Antoine Legrand. + +- Timer: Fixed "unhashable type" error on Python 3. + +- Hub: Do not attempt to unregister operations on an already closed + poller instance. + +.. _version-3.0.9: + +3.0.9 +===== +:release-date: 2014-01-13 05:30 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.4.0. + +- Redis: Basic cancel for fanout based queues now sends a corresponding + ``UNSUBSCRIBE`` command to the server. + + This fixes an issue with pidbox where reply messages could be received + after the consumer was cancelled, giving the ``"message to queue without + consumers"`` error. + +- MongoDB: Improved connection string and options handling + (Issue #266 + Issue #120). + + Contributed by Alex Koshelev. + +- SQS: Limit the number of messages when receiving in batch to 10. + + This is a hard limit enforced by Amazon so the sqs transport + must not exceeed this value. + + Fix contributed by Eric Reynolds. + +- ConsumerMixin: ``consume`` now checks heartbeat every time the + socket times out. + + Contributed by Dustin J. Mitchell. + +- Retry Policy: A max retries of 0 did not retry forever. + + Fix contributed by Antoine Legrand. + +- Simple: If passing a Queue object the simple utils will now take + default routing key from that queue. + + Contributed by Fernando Jorge Mota. + +- ``repr(producer)`` no longer evaluates the underlying channnel. + +- Redis: The map of Redis error classes are now exposed at the module level + using the :func:`kombu.transport.redis.get_redis_error_classes` function. + +- Async: ``Hub.close`` now sets ``.poller`` to None. + +.. _version-3.0.8: + +3.0.8 +===== +:release-date: 2013-12-16 05:00 P.M UTC +:release-by: Ask Solem + +- Serializer: loads and dumps now wraps exceptions raised into + :exc:`~kombu.exceptions.DecodeError` and + :exc:`kombu.exceptions.EncodeError` respectively. + + Contributed by Ionel Cristian Maries + +- Redis: Would attempt to read from the wrong connection if a select/epoll/kqueue + exception event happened. + + Fix contributed by Michael Nelson. + +- Redis: Disabling ack emulation now works properly. + + Fix contributed by Michael Nelson. + +- Redis: :exc:`IOError` and :exc:`OSError` are now treated as recoverable + connection errors. + +- SQS: Improved performance by reading messages in bulk. + + Contributed by Matt Wise. + +- Connection Pool: Attempting to acquire from a closed pool will now + raise :class:`RuntimeError`. + +.. _version-3.0.7: + +3.0.7 +===== +:release-date: 2013-12-02 04:00 P.M UTC +:release-by: Ask Solem + +- Fixes Python 2.6 compatibility. + +- Redis: Fixes 'bad file descriptor' issue. + +.. _version-3.0.6: + +3.0.6 +===== +:release-date: 2013-11-21 04:50 P.M UTC +:release-by: Ask Solem + +- Timer: No longer attempts to hash keyword arguments (Issue #275). + +- Async: Did not account for the long type for file descriptors. + + Fix contributed by Fabrice Rabaute. + +- PyPy: kqueue support was broken. + +- Redis: Bad pub/sub payloads no longer crashes the consumer. + +- Redis: Unix socket URLs can now specify a virtual host by including + it as a query parameter. + + Example URL specifying a virtual host using database number 3:: + + redis+socket:///tmp/redis.sock?virtual_host=3 + +- ``kombu.VERSION`` is now a named tuple. + +.. _version-3.0.5: + +3.0.5 +===== +:release-date: 2013-11-15 11:00 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.3.3. + +- Redis: Fixed Python 3 compatibility problem (Issue #270). + +- MongoDB: Fixed problem with URL parsing when authentication used. + + Fix contributed by dongweiming. + +- pyamqp: Fixed small issue when publishing the message and + the property dictionary was set to None. + + Fix contributed by Victor Garcia. + +- Fixed problem in ``repr(LaxBoundedSemaphore)``. + + Fix contributed by Antoine Legrand. + +- Tests now passing on Python 3.3. + +.. _version-3.0.4: + +3.0.4 +===== +:release-date: 2013-11-08 01:00 P.M UTC +:release-by: Ask Solem + +- common.QoS: ``decrement_eventually`` now makes sure the value + does not go below 1 if a prefetch count is enabled. + +.. _version-3.0.3: + +3.0.3 +===== +:release-date: 2013-11-04 03:00 P.M UTC +:release-by: Ask Solem + +- SQS: Properly reverted patch that caused delays between messages. + + Contributed by James Saryerwinnie + +- select: Clear all registerd fds on poller.cloe + +- Eventloop: unregister if EBADF raised. + +.. _version-3.0.2: + +3.0.2 +===== +:release-date: 2013-10-29 02:00 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` version 1.3.2. + +- select: Fixed problem where unregister did not properly remove + the fd. + +.. _version-3.0.1: + +3.0.1 +===== +:release-date: 2013-10-24 04:00 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` version 1.3.1. + +- Redis: New option ``fanout_keyprefix`` + + This transport option is recommended for all users as it ensures + that broadcast (fanout) messages sent is only seen by the current + virtual host:: + + Connection('redis://', transport_options={'fanout_keyprefix': True}) + + However, enabling this means that you cannot send or receive messages + from older Kombu versions so make sure all of your participants + are upgraded and have the transport option enabled. + + This will be the default behavior in Kombu 4.0. + +- Distribution: Removed file ``requirements/py25.txt``. + +- MongoDB: Now disables ``auto_start_request``. + +- MongoDB: Enables ``use_greenlets`` if eventlet/gevent used. + +- Pidbox: Fixes problem where expires header was None, + which is a value not supported by the amq protocol. + +- ConsumerMixin: New ``consumer_context`` method for starting + the consumer without draining events. + +.. _version-3.0.0: + +3.0.0 +===== +:release-date: 2013-10-14 04:00 P.M BST +:release-by: Ask Solem + +- Now depends on :mod:`amqp` version 1.3. + +- No longer supports Python 2.5 + + The minimum Python version supported is now Python 2.6.0 for Python2, + and Python 3.3 for Python3. + +- Dual codebase supporting both Python 2 and 3. + + No longer using ``2to3``, making it easier to maintain support for + both versions. + +- pickle, yaml and msgpack deserialization is now disabled by default. + + This means that Kombu will by default refuse to handle any content type other + than json. + + Pickle is known to be a security concern as it will happily + load any object that is embedded in a pickle payload, and payloads + can be crafted to do almost anything you want. The default + serializer in Kombu is json but it also supports a number + of other serialization formats that it will evaluate if received: + including pickle. + + It was always assumed that users were educated about the security + implications of pickle, but in hindsight we don't think users + should be expected to secure their services if we have the ability to + be secure by default. + + By disabling any content type that the user did not explicitly + want enabled we ensure that the user must be conscious when they + add pickle as a serialization format to support. + + The other built-in serializers (yaml and msgpack) are also disabled + even though they aren't considered insecure [#f1]_ at this point. + Instead they're disabled so that if a security flaw is found in one of these + libraries in the future, you will only be affected if you have + explicitly enabled them. + + To have your consumer accept formats other than json you have to + explicitly add the wanted formats to a white-list of accepted + content types:: + + >>> c = Consumer(conn, accept=['json', 'pickle', 'msgpack']) + + or when using synchronous access:: + + >>> msg = queue.get(accept=['json', 'pickle', 'msgpack']) + + The ``accept`` argument was first supported for consumers in version + 2.5.10, and first supported by ``Queue.get`` in version 2.5.15 + so to stay compatible with previous versions you can enable + the previous behavior: + + >>> from kombu import enable_insecure_serializers + >>> enable_insecure_serializers() + + But note that this has global effect, so be very careful should you use it. + + .. rubric:: Footnotes + + .. [#f1] The PyYAML library has a :func:`yaml.load` function with some of the + same security implications as pickle, but Kombu uses the + :func:`yaml.safe_load` function which is not known to be affected. + +- kombu.async: Experimental event loop implementation. + + This code was previously in Celery but was moved here + to make it easier for async transport implementations. + + The API is meant to match the Tulip API which will be included + in Python 3.4 as the ``asyncio`` module. It's not a complete + implementation obviously, but the goal is that it will be easy + to change to it once that is possible. + +- Utility function ``kombu.common.ipublish`` has been removed. + + Use ``Producer(..., retry=True)`` instead. + +- Utility function ``kombu.common.isend_reply`` has been removed + + Use ``send_reply(..., retry=True)`` instead. + +- ``kombu.common.entry_to_queue`` and ``kombu.messaging.entry_to_queue`` + has been removed. + + Use ``Queue.from_dict(name, **options)`` instead. + +- Redis: Messages are now restored at the end of the list. + + Contributed by Mark Lavin. + +- ``StdConnectionError`` and ``StdChannelError`` is removed + and :exc:`amqp.ConnectionError` and :exc:`amqp.ChannelError` is used + instead. + +- Message object implementation has moved to :class:`kombu.message.Message`. + +- Serailization: Renamed functions encode/decode to + :func:`~kombu.serialization.dumps` and :func:`~kombu.serialization.loads`. + + For backward compatibility the old names are still available as aliases. + +- The ``kombu.log.anon_logger`` function has been removed. + + Use :func:`~kombu.log.get_logger` instead. + +- ``queue_declare`` now returns namedtuple with ``queue``, ``message_count``, + and ``consumer_count`` fields. + +- LamportClock: Can now set lock class + +- :mod:`kombu.utils.clock`: Utilities for ordering events added. + +- :class:`~kombu.simple.SimpleQueue` now allows you to override + the exchange type used. + + Contributed by Vince Gonzales. + +- Zookeeper transport updated to support new changes in the :mod:`kazoo` + library. + + Contributed by Mahendra M. + +- pyamqp/librabbitmq: Transport options are now forwarded as keyword arguments + to the underlying connection (Issue #214). + +- Transports may now distinguish between recoverable and irrecoverable + connection and channel errors. + +- ``kombu.utils.Finalize`` has been removed: Use + :mod:`multiprocessing.util.Finalize` instead. + +- Memory transport now supports the fanout exchange type. + + Contributed by Davanum Srinivas. + +- Experimental new `Pyro`_ transport (:mod:`kombu.transport.pyro`). + + Contributed by Tommie McAfee. + +.. _`Pyro`: http://pythonhosted.org/Pyro + +- Experimental new `SoftLayer MQ`_ transport (:mod:`kombu.transport.SLMQ`). + + Contributed by Kevin McDonald + +.. _`SoftLayer MQ`: http://www.softlayer.com/services/additional/message-queue + +- Eventio: Kqueue breaks in subtle ways so select is now used instead. + +- SQLAlchemy transport: Can now specify table names using the + ``queue_tablename`` and ``message_tablename`` transport options. + + Contributed by Ryan Petrello. + +Redis transport: Now supports using local UNIX sockets to communicate with the + Redis server (Issue #1283) + + To connect using a UNIX socket you have to use the ``redis+socket`` + URL-prefix: ``redis+socket:///tmp/redis.sock``. + + This functionality was merged from the `celery-redis-unixsocket`_ project. + Contributed by Maxime Rouyrre. + +ZeroMQ transport: drain_events now supports timeout. + + Contributed by Jesper Thomschütz. + +.. _`celery-redis-unixsocket`: + https://github.com/piquadrat/celery-redis-unixsocket + +.. _version-2.5.16: + +2.5.16 +====== +:release-date: 2013-10-04 03:30 P.M BST +:release-by: Ask Solem + +- Python3: Fixed problem with dependencies not being installed. + +.. _version-2.5.15: + +2.5.15 +====== +:release-date: 2013-10-04 03:30 P.M BST +:release-by: Ask Solem + +- Declaration cache: Now only keeps hash of declaration + so that it does not keep a reference to the channel. + +- Declaration cache: Now respects ``entity.can_cache_declaration`` + attribute. + +- Fixes Python 2.5 compatibility. + +- Fixes tests after python-msgpack changes. + +- ``Queue.get``: Now supports ``accept`` argument. + +.. _version-2.5.14: + +2.5.14 +====== +:release-date: 2013-08-23 05:00 P.M BST +:release-by: Ask Solem + +- safe_str did not work properly resulting in + :exc:`UnicodeDecodeError` (Issue #248). + +.. _version-2.5.13: + +2.5.13 +====== +:release-date: 2013-08-16 04:00 P.M BST +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.0.13 + +- Fixed typo in Django functional tests. + +- safe_str now returns Unicode in Python 2.x + + Fix contributed by Germán M. Bravo. + +- amqp: Transport options are now merged with arguments + supplied to the connection. + +- Tests no longer depends on distribute, which was deprecated + and merged back into setuptools. + + Fix contributed by Sascha Peilicke. + +- ConsumerMixin now also restarts on channel related errors. + + Fix contributed by Corentin Ardeois. + +.. _version-2.5.12: + +2.5.12 +====== +:release-date: 2013-06-28 03:30 P.M BST +:release-by: Ask Solem + +- Redis: Ignore errors about keys missing in the round-robin cycle. + +- Fixed test suite errors on Python 3. + +- Fixed msgpack test failures. + +.. _version-2.5.11: + +2.5.11 +====== +:release-date: 2013-06-25 02:30 P.M BST +:release-by: Ask Solem + +- Now depends on amqp 1.0.12 (Py3 compatibility issues). + +- MongoDB: Removed cause of a "database name in URI is being ignored" + warning. + + Fix by Flavio Percoco Premoli + +- Adds ``passive`` option to :class:`~kombu.Exchange`. + + Setting this flag means that the exchange will not be declared by kombu, + but that it must exist already (or an exception will be raised). + + Contributed by Rafal Malinowski + +- Connection.info() now gives the current hostname and not the list of + available hostnames. + + Fix contributed by John Shuping. + +- pyamqp: Transport options are now forwarded as kwargs to ``amqp.Connection``. + +- librabbitmq: Transport options are now forwarded as kwargs to + ``librabbitmq.Connection``. + +- librabbitmq: Now raises :exc:`NotImplementedError` if SSL is enabled. + + The librabbitmq library does not support ssl, + but you can use stunnel or change to the ``pyamqp://`` transport + instead. + + Fix contributed by Dan LaMotte. + +- librabbitmq: Fixed a cyclic reference at connection close. + +- eventio: select implementation now removes bad file descriptors. + +- eventio: Fixed Py3 compatibility problems. + +- Functional tests added for py-amqp and librabbitmq transports. + +- Resource.force_close_all no longer uses a mutex. + +- Pidbox: Now ignores `IconsistencyError` when sending replies, + as this error simply means that the client may no longer be alive. + +- Adds new :meth:`Connection.collect <~kombu.Connection.collect>` method, + that can be used to clean up after connections without I/O. + +- ``queue_bind`` is no longer called for queues bound to + the "default exchange" (Issue #209). + + Contributed by Jonathan Halcrow. + +- The max_retries setting for retries was not respected correctly (off by one). + +.. _version-2.5.10: + +2.5.10 +====== +:release-date: 2013-04-11 06:10 P.M BST +:release-by: Ask Solem + +Note about upcoming changes for Kombu 3.0 +----------------------------------------- + +Kombu 3 consumers will no longer accept pickle/yaml or msgpack +by default, and you will have to explicitly enable untrusted deserializers +either globally using :func:`kombu.enable_insecure_serializers`, or +using the ``accept`` argument to :class:`~kombu.Consumer`. + +Changes +------- + +- New utility function to disable/enable untrusted serializers. + + - :func:`kombu.disable_insecure_serializers` + - :func:`kombu.enable_insecure_serializers`. + +- Consumer: ``accept`` can now be used to specify a whitelist + of content types to accept. + + If the accept whitelist is set and a message is received + with a content type that is not in the whitelist then a + :exc:`~kombu.exceptions.ContentDisallowed` exception + is raised. Note that this error can be handled by the already + existing `on_decode_error` callback + + Examples:: + + Consumer(accept=['application/json']) + Consumer(accept=['pickle', 'json']) + +- Now depends on amqp 1.0.11 + +- pidbox: Mailbox now supports the ``accept`` argument. + +- Redis: More friendly error for when keys are missing. + +- Connection URLs: The parser did not work well when there were + multiple '+' tokens. + +.. _version-2.5.9: + +2.5.9 +===== +:release-date: 2013-04-08 05:07 P.M BST +:release-by: Ask Solem + +- Pidbox: Now warns if there are multiple nodes consuming from + the same pidbox. + +- Adds :attr:`Queue.on_declared ` + + A callback to be called when the queue is declared, + with signature ``(name, messages, consumers)``. + +- Now uses fuzzy matching to suggest alternatives to typos in transport + names. + +- SQS: Adds new transport option ``queue_prefix``. + + Contributed by j0hnsmith. + +- pyamqp: No longer overrides verify_connection. + +- SQS: Now specifies the ``driver_type`` and ``driver_name`` + attributes. + + Fix contributed by Mher Movsisyan. + +- Fixed bug with ``kombu.utils.retry_over_time`` when no errback + specified. + + +.. _version-2.5.8: + +2.5.8 +===== +:release-date: 2013-03-21 04:00 P.M UTC +:release-by: Ask Solem + +- Now depends on :mod:`amqp` 1.0.10 which fixes a Python 3 compatibility error. + +- Redis: Fixed a possible race condition (Issue #171). + +- Redis: Ack emulation/visibility_timeout can now be disabled + using a transport option. + + Ack emulation adds quite a lot of overhead to ensure data is safe + even in the event of an unclean shutdown. If data loss do not worry + you there is now an `ack_emulation` transport option you can use + to disable it:: + + Connection('redis://', transport_options={'ack_emulation': False}) + +- SQS: Fixed :mod:`boto` v2.7 compatibility (Issue #207). + +- Exchange: Should not try to re-declare default exchange (``""``) + (Issue #209). + +- SQS: Long polling is now disabled by default as it was not + implemented correctly, resulting in long delays between receiving + messages (Issue #202). + +- Fixed Python 2.6 incompatibility depending on ``exc.errno`` + being available. + + Fix contributed by Ephemera. + +.. _version-2.5.7: + +2.5.7 +===== +:release-date: 2013-03-08 01:00 P.M UTC +:release-by: Ask Solem + +- Now depends on amqp 1.0.9 + +- Redis: A regression in 2.5.6 caused the redis transport to + ignore options set in ``transport_options``. + +- Redis: New ``socket_timeout`` transport option. + +- Redis: ``InconsistencyError`` is now regarded as a recoverable error. + +- Resource pools: Will no longer attempt to release resource + that was never acquired. + +- MongoDB: Now supports the ``ssl`` option. + + Contributed by Sebastian Pawlus. + +.. _version-2.5.6: + +2.5.6 +===== +:release-date: 2013-02-08 01:00 P.M UTC +:release-by: Ask Solem + +- Now depends on amqp 1.0.8 which works around a bug found on some + Python 2.5 installations where 2**32 overflows to 0. + +.. _version-2.5.5: + +2.5.5 +===== +:release-date: 2013-02-07 05:00 P.M UTC +:release-by: Ask Solem + +SQS: Now supports long polling (Issue #176). + + The polling interval default has been changed to 0 and a new + transport option (``wait_time_seconds``) has been added. + This parameter specifies how long to wait for a message from + SQS, and defaults to 20 seconds, which is the maximum + value currently allowed by Amazon SQS. + + Contributed by James Saryerwinnie. + +- SQS: Now removes unpickleable fields before restoring messages. + +- Consumer.__exit__ now ignores exceptions occurring while + cancelling the consumer. + +- Virtual: Routing keys can now consist of characters also used + in regular expressions (e.g. parens) (Issue #194). + +- Virtual: Fixed compression header when restoring messages. + + Fix contributed by Alex Koshelev. + +- Virtual: ack/reject/requeue now works while using ``basic_get``. + +- Virtual: Message.reject is now supported by virtual transports + (requeue depends on individual transport support). + +- Fixed typo in hack used for static analyzers. + + Fix contributed by Basil Mironenko. + +.. _version-2.5.4: + +2.5.4 +===== +:release-date: 2012-12-10 12:35 P.M UTC +:release-by: Ask Solem + +- Fixed problem with connection clone and multiple URLs (Issue #182). + + Fix contributed by Dane Guempel. + +- zeromq: Now compatible with libzmq 3.2.x. + + Fix contributed by Andrey Antukh. + +- Fixed Python 3 installation problem (Issue #187). + +.. _version-2.5.3: + +2.5.3 +===== +:release-date: 2012-11-29 12:35 P.M UTC +:release-by: Ask Solem + +- Pidbox: Fixed compatibility with Python 2.6 + +2.5.2 +===== +:release-date: 2012-11-29 12:35 P.M UTC +:release-by: Ask Solem + +.. _version-2.5.2: + +2.5.2 +===== +:release-date: 2012-11-29 12:35 P.M UTC +:release-by: Ask Solem + +- [Redis] Fixed connection leak and added a new 'max_connections' transport + option. + +.. _version-2.5.1: + +2.5.1 +===== +:release-date: 2012-11-28 12:45 P.M UTC +:release-by: Ask Solem + +- Fixed bug where return value of Queue.as_dict could not be serialized with + JSON (Issue #177). + +.. _version-2.5.0: + +2.5.0 +===== +:release-date: 2012-11-27 04:00 P.M UTC +:release-by: Ask Solem + +- `py-amqp`_ is now the new default transport, replacing ``amqplib``. + + The new `py-amqp`_ library is a fork of amqplib started with the + following goals: + + - Uses AMQP 0.9.1 instead of 0.8 + - Support for heartbeats (Issue #79 + Issue #131) + - Automatically revives channels on channel errors. + - Support for all RabbitMQ extensions + - Consumer Cancel Notifications (Issue #131) + - Publisher Confirms (Issue #131). + - Exchange-to-exchange bindings: ``exchange_bind`` / ``exchange_unbind``. + - API compatible with :mod:`librabbitmq` so that it can be used + as a pure-python replacement in environments where rabbitmq-c cannot + be compiled. librabbitmq will be updated to support all the same + features as py-amqp. + +- Support for using multiple connection URL's for failover. + + The first argument to :class:`~kombu.Connection` can now be a list of + connection URLs: + + .. code-block:: python + + Connection(['amqp://foo', 'amqp://bar']) + + or it can be a single string argument with several URLs separated by + semicolon: + + .. code-block:: python + + Connection('amqp://foo;amqp://bar') + + There is also a new keyword argument ``failover_strategy`` that defines + how :meth:`~kombu.Connection.ensure_connection`/ + :meth:`~kombu.Connection.ensure`/:meth:`kombu.Connection.autoretry` will + reconnect in the event of connection failures. + + The default reconnection strategy is ``round-robin``, which will simply + cycle through the list forever, and there's also a ``shuffle`` strategy + that will select random hosts from the list. Custom strategies can also + be used, in that case the argument must be a generator yielding the URL + to connect to. + + Example: + + .. code-block:: python + + Connection('amqp://foo;amqp://bar') + +- Now supports PyDev, PyCharm, pylint and other static code analysis tools. + +- :class:`~kombu.Queue` now supports multiple bindings. + + You can now have multiple bindings in the same queue by having + the second argument be a list: + + .. code-block:: python + + from kombu import binding, Queue + + Queue('name', [ + binding(Exchange('E1'), routing_key='foo'), + binding(Exchange('E1'), routing_key='bar'), + binding(Exchange('E2'), routing_key='baz'), + ]) + + To enable this, helper methods have been added: + + - :meth:`~kombu.Queue.bind_to` + - :meth:`~kombu.Queue.unbind_from` + + Contributed by Rumyana Neykova. + +- Custom serializers can now be registered using Setuptools entry-points. + + See :ref:`serialization-entrypoints`. + +- New :class:`kombu.common.QoS` class used as a thread-safe way to manage + changes to a consumer or channels prefetch_count. + + This was previously an internal class used in Celery now moved to + the :mod:`kombu.common` module. + +- Consumer now supports a ``on_message`` callback that can be used to process + raw messages (not decoded). + + Other callbacks specified using the ``callbacks`` argument, and + the ``receive`` method will be not be called when a on message callback + is present. + +- New utility :func:`kombu.common.ignore_errors` ignores connection and + channel errors. + + Must only be used for cleanup actions at shutdown or on connection loss. + +- Support for exchange-to-exchange bindings. + + The :class:`~kombu.Exchange` entity gained ``bind_to`` + and ``unbind_from`` methods: + + .. code-block:: python + + e1 = Exchange('A')(connection) + e2 = Exchange('B')(connection) + + e2.bind_to(e1, routing_key='rkey', arguments=None) + e2.unbind_from(e1, routing_key='rkey', arguments=None) + + This is currently only supported by the ``pyamqp`` transport. + + Contributed by Rumyana Neykova. + +.. _version-2.4.10: + +2.4.10 +====== +:release-date: 2012-11-22 06:00 P.M UTC +:release-by: Ask Solem + +- The previous versions connection pool changes broke Redis support so that + it would always connect to localhost (default setting) no matter what + connection parameters were provided (Issue #176). + +.. _version-2.4.9: + +2.4.9 +===== +:release-date: 2012-11-21 03:00 P.M UTC +:release-by: Ask Solem + +- Redis: Fixed race condition that could occur while trying to restore + messages (Issue #171). + + Fix contributed by Ollie Walsh. + +- Redis: Each channel is now using a specific connection pool instance, + which is disconnected on connection failure. + +- ProducerPool: Fixed possible dead-lock in the acquire method. + +- ProducerPool: ``force_close_all`` no longer tries to call the non-existent + ``Producer._close``. + +- librabbitmq: Now implements ``transport.verify_connection`` so that + connection pools will not give back connections that are no longer working. + +- New and better ``repr()`` for Queue and Exchange objects. + +- Python3: Fixed problem with running the unit test suite. + +- Python3: Fixed problem with JSON codec. + +.. _version-2.4.8: + +2.4.8 +===== +:release-date: 2012-11-02 05:00 P.M UTC +:release-by: Ask Solem + +- Redis: Improved fair queue cycle implementation (Issue #166). + + Contributed by Kevin McCarthy. + +- Redis: Unacked message restore limit is now unlimited by default. + + Also, the limit can now be configured using the ``unacked_restore_limit`` + transport option: + + .. code-block:: python + + Connection('redis://', transport_options={ + 'unacked_restore_limit': 100, + }) + + A limit of 100 means that the consumer will restore at most 100 + messages at each pass. + +- Redis: Now uses a mutex to ensure only one consumer restores messages at a + time. + + The mutex expires after 5 minutes by default, but can be configured + using the ``unacked_mutex_expire`` transport option. + +- LamportClock.adjust now returns the new clock value. + +- Heartbeats can now be specified in URLs. + + Fix contributed by Mher Movsisyan. + +- Kombu can now be used with PyDev, PyCharm and other static analysis tools. + +- Fixes problem with msgpack on Python 3 (Issue #162). + + Fix contributed by Jasper Bryant-Greene + +- amqplib: Fixed bug with timeouts when SSL is used in non-blocking mode. + + Fix contributed by Mher Movsisyan + + +.. _version-2.4.7: + +2.4.7 +===== +:release-date: 2012-09-18 03:00 P.M BST +:release-by: Ask Solem + +- Virtual: Unknown exchanges now default to 'direct' when sending a message. + +- MongoDB: Fixed memory leak when merging keys stored in the db (Issue #159) + + Fix contributed by Michael Korbakov. + +- MongoDB: Better index for MongoDB transport (Issue #158). + + This improvement will create a new compund index for queue and _id in order + to be able to use both indexed fields for getting a new message (using + queue field) and sorting by _id. It'll be necessary to manually delete + the old index from the collection. + + Improvement contributed by rmihael + +.. _version-2.4.6: + +2.4.6 +===== +:release-date: 2012-09-12 03:00 P.M BST +:release-by: Ask Solem + +- Adds additional compatibility dependencies: + + - Python <= 2.6: + + - importlib + - ordereddict + + - Python <= 2.5 + + - simplejson + +.. _version-2.4.5: + +2.4.5 +===== +:release-date: 2012-08-30 03:36 P.M BST +:release-by: Ask Solem + +- Last version broke installtion on PyPy and Jython due + to test requirements clean-up. + +.. _version-2.4.4: + +2.4.4 +===== +:release-date: 2012-08-29 04:00 P.M BST +:release-by: Ask Solem + +- amqplib: Fixed a bug with asynchronously reading large messages. + +- pyamqp: Now requires amqp 0.9.3 + +- Cleaned up test requirements. + +.. _version-2.4.3: + +2.4.3 +===== +:release-date: 2012-08-25 10:30 P.M BST +:release-by: Ask Solem + +- Fixed problem with amqp transport alias (Issue #154). + +.. _version-2.4.2: + +2.4.2 +===== +:release-date: 2012-08-24 05:00 P.M BST +:release-by: Ask Solem + +- Having an empty transport name broke in 2.4.1. + + +.. _version-2.4.1: + +2.4.1 +===== +:release-date: 2012-08-24 04:00 P.M BST +:release-by: Ask Solem + +- Redis: Fixed race condition that could cause the consumer to crash (Issue #151) + + Often leading to the error message ``"could not convert string to float"`` + +- Connection retry could cause an inifite loop (Issue #145). + +- The ``amqp`` alias is now resolved at runtime, so that eventlet detection + works even if patching was done later. + +.. _version-2.4.0: + +2.4.0 +===== +:release-date: 2012-08-17 08:00 P.M BST +:release-by: Ask Solem + +- New experimental :mod:`ZeroMQ >> conn = Connection('pyamqp://guest:guest@localhost//') + + + The ``pyamqp://`` transport will be the default fallback transport + in Kombu version 3.0, when :mod:`librabbitmq` is not installed, + and librabbitmq will also be updated to support the same features. + +- Connection now supports heartbeat argument. + + If enabled you must make sure to manually maintain heartbeats + by calling the ``Connection.heartbeat_check`` at twice the rate + of the specified heartbeat interval. + + E.g. if you have ``Connection(heartbeat=10)``, + then you must call ``Connection.heartbeat_check()`` every 5 seconds. + + if the server has not sent heartbeats at a suitable rate then + the heartbeat check method must raise an error that is listed + in ``Connection.connection_errors``. + + The attribute ``Connection.supports_heartbeats`` has been added + for the ability to inspect if a transport supports heartbeats + or not. + + Calling ``heartbeat_check`` on a transport that does + not support heartbeats results in a noop operation. + +- SQS: Fixed bug with invalid characters in queue names. + + Fix contributed by Zach Smith. + +- utils.reprcall: Fixed typo where kwargs argument was an empty tuple by + default, and not an empty dict. + +.. _version-2.2.6: + +2.2.6 +===== +:release-date: 2012-07-10 05:00 P.M BST +:release-by: Ask Solem + +- Adds ``kombu.messaging.entry_to_queue`` for compat with previous versions. + +.. _version-2.2.5: + +2.2.5 +===== +:release-date: 2012-07-10 05:00 P.M BST +:release-by: Ask Solem + +- Pidbox: Now sets queue expire at 10 seconds for reply queues. + +- EventIO: Now ignores ``ValueError`` raised by epoll unregister. + +- MongoDB: Fixes Issue #142 + + Fix by Flavio Percoco Premoli + +.. _version-2.2.4: + +2.2.4 +===== +:release-date: 2012-07-05 04:00 P.M BST +:release-by: Ask Solem + +- Support for msgpack-python 0.2.0 (Issue #143) + + The latest msgpack version no longer supports Python 2.5, so if you're + still using that you need to depend on an earlier msgpack-python version. + + Fix contributed by Sebastian Insua + +- :func:`~kombu.common.maybe_declare` no longer caches entities with the + ``auto_delete`` flag set. + +- New experimental filesystem transport. + + Contributed by Bobby Beever. + +- Virtual Transports: Now support anonymous queues and exchanges. + +.. _version-2.2.3: + +2.2.3 +===== +:release-date: 2012-06-24 05:00 P.M BST +:release-by: Ask Solem + +- ``BrokerConnection`` now renamed to ``Connection``. + + The name ``Connection`` has been an alias for a very long time, + but now the rename is official in the documentation as well. + + The Connection alias has been available since version 1.1.3, + and ``BrokerConnection`` will still work and is not deprecated. + +- ``Connection.clone()`` now works for the sqlalchemy transport. + +- :func:`kombu.common.eventloop`, :func:`kombu.utils.uuid`, + and :func:`kombu.utils.url.parse_url` can now be + imported from the :mod:`kombu` module directly. + +- Pidbox transport callback ``after_reply_message_received`` now happens + in a finally block. + +- Trying to use the ``librabbitmq://`` transport will now show the right + name in the :exc:`ImportError` if :mod:`librabbitmq` is not installed. + + The librabbitmq falls back to the older ``pylibrabbitmq`` name for + compatibility reasons and would therefore show ``No module named + pylibrabbitmq`` instead of librabbitmq. + + +.. _version-2.2.2: + +2.2.2 +===== +:release-date: 2012-06-22 02:30 P.M BST +:release-by: Ask Solem + +- Now depends on :mod:`anyjson` 0.3.3 + +- Json serializer: Now passes :class:`buffer` objects directly, + since this is supported in the latest :mod:`anyjson` version. + +- Fixes blocking epoll call if timeout was set to 0. + + Fix contributed by John Watson. + +- setup.py now takes requirements from the :file:`requirements/` directory. + +- The distribution directory :file:`contrib/` is now renamed to :file:`extra/` + +.. _version-2.2.1: + +2.2.1 +===== +:release-date: 2012-06-21 01:00 P.M BST +:release-by: Ask Solem + +- SQS: Default visibility timeout is now 30 minutes. + + Since we have ack emulation the visibility timeout is + only in effect if the consumer is abrubtly terminated. + +- retry argument to ``Producer.publish`` now works properly, + when the declare argument is specified. + +- Json serializer: didn't handle buffer objects (Issue #135). + + Fix contributed by Jens Hoffrichter. + +- Virtual: Now supports passive argument to ``exchange_declare``. + +- Exchange & Queue can now be bound to connections (which will use the default + channel): + + >>> exchange = Exchange('name') + >>> bound_exchange = exchange(connection) + >>> bound_exchange.declare() + +- ``SimpleQueue`` & ``SimpleBuffer`` can now be bound to connections (which + will use the default channel). + +- ``Connection.manager.get_bindings`` now works for librabbitmq and pika. + +- Adds new transport info attributes:: + + - ``Transport.driver_type`` + + Type of underlying driver, e.g. "amqp", "redis", "sql". + + - ``Transport.driver_name`` + + Name of library used e.g. "amqplib", "redis", "pymongo". + + - ``Transport.driver_version()`` + + Version of underlying library. + +.. _version-2.2.0: + +2.2.0 +===== +:release-date: 2012-06-07 03:10 P.M BST +:release-by: Ask Solem + +.. _v220-important: + +Important Notes +--------------- + +- The canonical source code repository has been moved to + + http://github.com/celery/kombu + +- Pidbox: Exchanges used by pidbox are no longer auto_delete. + + Auto delete has been described as a misfeature, + and therefore we have disabled it. + + For RabbitMQ users old exchanges used by pidbox must be removed, + these are named ``mailbox_name.pidbox``, + and ``reply.mailbox_name.pidbox``. + + The following command can be used to clean up these exchanges:: + + VHOST=/ URL=amqp:// python -c'import sys,kombu;[kombu.Connection( + sys.argv[-1]).channel().exchange_delete(x) + for x in sys.argv[1:-1]]' \ + $(sudo rabbitmqctl -q list_exchanges -p "$VHOST" \ + | grep \.pidbox | awk '{print $1}') "$URL" + + The :envvar:`VHOST` variable must be set to the target RabbitMQ virtual host, + and the :envvar:`URL` must be the AMQP URL to the server. + +- The ``amqp`` transport alias will now use :mod:`librabbitmq` + if installed. + + `py-librabbitmq`_ is a fast AMQP client for Python + using the librabbitmq C library. + + It can be installed by:: + + $ pip install librabbitmq + + It will not be used if the process is monkey patched by eventlet/gevent. + +.. _`py-librabbitmq`: https://github.com/celery/librabbitmq + +.. _v220-news: + +News +---- + +- Redis: Ack emulation improvements. + + Reducing the possibility of data loss. + + Acks are now implemented by storing a copy of the message when the message + is consumed. The copy is not removed until the consumer acknowledges + or rejects it. + + This means that unacknowledged messages will be redelivered either + when the connection is closed, or when the visibility timeout is exceeded. + + - Visibility timeout + + This is a timeout for acks, so that if the consumer + does not ack the message within this time limit, the message + is redelivered to another consumer. + + The timeout is set to one hour by default, but + can be changed by configuring a transport option: + + >>> Connection('redis://', transport_options={ + ... 'visibility_timeout': 1800, # 30 minutes + ... }) + + **NOTE**: Messages that have not been acked will be redelivered + if the visibility timeout is exceeded, for Celery users + this means that ETA/countdown tasks that are scheduled to execute + with a time that exceeds the visibility timeout will be executed + twice (or more). If you plan on using long ETA/countdowns you + should tweak the visibility timeout accordingly:: + + BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 18000} # 5 hours + + Setting a long timeout means that it will take a long time + for messages to be redelivered in the event of a power failure, + but if so happens you could temporarily set the visibility timeout lower + to flush out messages when you start up the systems again. + +- Experimental `Apache ZooKeeper`_ transport + + More information is in the module reference: + :mod:`kombu.transport.zookeeper`. + + Contributed by Mahendra M. + +.. _`Apache ZooKeeper`: http://zookeeper.apache.org/ + +- Redis: Priority support. + + The message's ``priority`` field is now respected by the Redis + transport by having multiple lists for each named queue. + The queues are then consumed by in order of priority. + + The priority field is a number in the range of 0 - 9, where + 0 is the default and highest priority. + + The priority range is collapsed into four steps by default, since it is + unlikely that nine steps will yield more benefit than using four steps. + The number of steps can be configured by setting the ``priority_steps`` + transport option, which must be a list of numbers in **sorted order**:: + + >>> x = Connection('redis://', transport_options={ + ... 'priority_steps': [0, 2, 4, 6, 8, 9], + ... }) + + Priorities implemented in this way is not as reliable as + priorities on the server side, which is why + nickname the feature "quasi-priorities"; + **Using routing is still the suggested way of ensuring + quality of service**, as client implemented priorities + fall short in a number of ways, e.g. if the worker + is busy with long running tasks, has prefetched many messages, + or the queues are congested. + + Still, it is possible that using priorities in combination + with routing can be more beneficial than using routing + or priorities alone. Experimentation and monitoring + should be used to prove this. + + Contributed by Germán M. Bravo. + +- Redis: Now cycles queues so that consuming is fair. + + This ensures that a very busy queue won't block messages + from other queues, and ensures that all queues have + an equal chance of being consumed from. + + This used to be the case before, but the behavior was + accidentally changed while switching to using blocking pop. + +- Redis: Auto delete queues that are bound to fanout exchanges + is now deleted at channel.close. + +- amqplib: Refactored the drain_events implementation. + +- Pidbox: Now uses ``connection.default_channel``. + +- Pickle serialization: Can now decode buffer objects. + +- Exchange/Queue declarations can now be cached even if + the entity is non-durable. + + This is possible because the list of cached declarations + are now kept with the connection, so that the entities + will be redeclared if the connection is lost. + +- Kombu source code now only uses one-level of explicit relative imports. + +.. _v220-fixes: + +Fixes +----- + +- eventio: Now ignores ENOENT raised by ``epoll.register``, and + EEXIST from ``epoll.unregister``. + +- eventio: kqueue now ignores :exc:`KeyError` on unregister. + +- Redis: ``Message.reject`` now supports the ``requeue`` argument. + +- Redis: Remove superfluous pipeline call. + + Fix contributed by Thomas Johansson. + +- Redis: Now sets redelivered header for redelivered messages. + +- Now always makes sure references to :func:`sys.exc_info` is removed. + +- Virtual: The compression header is now removed before restoring messages. + +- More tests for the SQLAlchemy backend. + + Contributed by Franck Cuny. + +- Url parsing did not handle MongoDB URLs properly. + + Fix contributed by Flavio Percoco Premoli. + +- Beanstalk: Ignore default tube when reserving. + + Fix contributed by Zhao Xiaohong. + +Nonblocking consume support +--------------------------- + +librabbitmq, amqplib and redis transports can now be used +non-blocking. + +The interface is very manual, and only consuming messages +is non-blocking so far. + +The API should not be regarded as stable or final +in any way. It is used by Celery which has very limited +needs at this point. Hopefully we can introduce a proper +callback-based API later. + +- ``Transport.eventmap`` + + Is a map of ``fd -> callback(fileno, event)`` + to register in an eventloop. + +- ``Transport.on_poll_start()`` + + Is called before every call to poll. + The poller must support ``register(fd, callback)`` + and ``unregister(fd)`` methods. + +- ``Transport.on_poll_start(poller)`` + + Called when the hub is initialized. + The poller argument must support the same + interface as :class:`kombu.utils.eventio.poll`. + +- ``Connection.ensure_connection`` now takes a callback + argument which is called for every loop while + the connection is down. + +- Adds ``connection.drain_nowait`` + + This is a non-blocking alternative to drain_events, + but only supported by amqplib/librabbitmq. + +- drain_events now sets ``connection.more_to_read`` if + there is more data to read. + + This is to support eventloops where other things + must be handled between draining events. + +.. _version-2.1.8: + +2.1.8 +===== +:release-date: 2012-05-06 03:06 P.M BST +:release-by: Ask Solem + +* Bound Exchange/Queue's are now pickleable. + +* Consumer/Producer can now be instantiated without a channel, + and only later bound using ``.revive(channel)``. + +* ProducerPool now takes ``Producer`` argument. + +* :func:`~kombu.utils.fxrange` now counts forever if the + stop argument is set to None. + (fxrange is like xrange but for decimals). + +* Auto delete support for virtual transports were incomplete + and could lead to problems so it was removed. + +* Cached declarations (:func:`~kombu.common.maybe_declare`) + are now bound to the underlying connection, so that + entities are redeclared if the connection is lost. + + This also means that previously uncacheable entities + (e.g. non-durable) can now be cached. + +* compat ConsumerSet: can now specify channel. + +.. _version-2.1.7: + +2.1.7 +===== +:release-date: 2012-04-27 06:00 P.M BST +:release-by: Ask Solem + +* compat consumerset now accepts optional channel argument. + +.. _version-2.1.6: + +2.1.6 +===== +:release-date: 2012-04-23 01:30 P.M BST +:release-by: Ask Solem + +* SQLAlchemy transport was not working correctly after URL parser change. + +* maybe_declare now stores cached declarations per underlying connection + instead of globally, in the rare case that data disappears from the + broker after connection loss. + +* Django: Added South migrations. + + Contributed by Joseph Crosland. + +.. _version-2.1.5: + +2.1.5 +===== +:release-date: 2012-04-13 03:30 P.M BST +:release-by: Ask Solem + +* The url parser removed more than the first leading slash (Issue #121). + +* SQLAlchemy: Can now specify url using + separator + + Example:: + + Connection('sqla+mysql://localhost/db') + +* Better support for anonymous queues (Issue #116). + + Contributed by Michael Barrett. + +* ``Connection.as_uri`` now quotes url parts (Issue #117). + +* Beanstalk: Can now set message TTR as a message property. + + Contributed by Andrii Kostenko + +.. _version-2.1.4: + +2.1.4 +===== +:release-date: 2012-04-03 04:00 P.M GMT +:release-by: Ask Solem + +* MongoDB: URL parsing are now delegated to the pymongo library + (Fixes Issue #103 and Issue #87). + + Fix contributed by Flavio Percoco Premoli and James Sullivan + +* SQS: A bug caused SimpleDB to be used even if sdb persistence + was not enabled (Issue #108). + + Fix contributed by Anand Kumria. + +* Django: Transaction was committed in the wrong place, causing + data cleanup to fail (Issue #115). + + Fix contributed by Daisuke Fujiwara. + +* MongoDB: Now supports replica set URLs. + + Contributed by Flavio Percoco Premoli. + +* Redis: Now raises a channel error if a queue key that is currently + being consumed from disappears. + + Fix contributed by Stephan Jaekel. + +* All transport 'channel_errors' lists now includes + ``kombu.exception.StdChannelError``. + +* All kombu exceptions now inherit from a common + :exc:`~kombu.exceptions.KombuError`. + +.. _version-2.1.3: + +2.1.3 +===== +:release-date: 2012-03-20 03:00 P.M GMT +:release-by: Ask Solem + +* Fixes Jython compatibility issues. + +* Fixes Python 2.5 compatibility issues. + +.. _version-2.1.2: + +2.1.2 +===== +:release-date: 2012-03-01 01:00 P.M GMT +:release-by: Ask Solem + +* amqplib: Last version broke SSL support. + +.. _version-2.1.1: + +2.1.1 +===== +:release-date: 2012-02-24 02:00 P.M GMT +:release-by: Ask Solem + +* Connection URLs now supports encoded characters. + +* Fixed a case where connection pool could not recover from connection loss. + + Fix contributed by Florian Munz. + +* We now patch amqplib's ``__del__`` method to skip trying to close the socket + if it is not connected, as this resulted in an annoying warning. + +* Compression can now be used with binary message payloads. + + Fix contributed by Steeve Morin. + +.. _version-2.1.0: + +2.1.0 +===== +:release-date: 2012-02-04 10:38 P.M GMT +:release-by: Ask Solem + +* MongoDB: Now supports fanout (broadcast) (Issue #98). + + Contributed by Scott Lyons. + +* amqplib: Now detects broken connections by using ``MSG_PEEK``. + +* pylibrabbitmq: Now supports ``basic_get`` (Issue #97). + +* gevent: Now always uses the ``select`` polling backend. + +* pika transport: Now works with pika 0.9.5 and 0.9.6dev. + + The old pika transport (supporting 0.5.x) is now available + as alias ``oldpika``. + + (Note terribly latency has been experienced with the new pika + versions, so this is still an experimental transport). + +* Virtual transports: can now set polling interval via the + transport options (Issue #96). + + Example:: + + >>> Connection('sqs://', transport_options={ + ... 'polling_interval': 5.0}) + + The default interval is transport specific, but usually + 1.0s (or 5.0s for the Django database transport, which + can also be set using the ``KOMBU_POLLING_INTERVAL`` setting). + +* Adds convenience function: :func:`kombu.common.eventloop`. + +.. _version-2.0.0: + +2.0.0 +===== +:release-date: 2012-01-15 06:34 P.M GMT +:release-by: Ask Solem + +.. _v200-important: + +Important Notes +--------------- + +.. _v200-python-compatibility: + +Python Compatibility +~~~~~~~~~~~~~~~~~~~~ + +* No longer supports Python 2.4. + + Users of Python 2.4 can still use the 1.x series. + + The 1.x series has entered bugfix-only maintenance mode, and will + stay that way as long as there is demand, and a willingness to + maintain it. + + +.. _v200-new-transports: + +New Transports +~~~~~~~~~~~~~~ + +* ``django-kombu`` is now part of Kombu core. + + The Django message transport uses the Django ORM to store messages. + + It uses polling, with a default polling interval of 5 seconds. + The polling interval can be increased or decreased by configuring the + ``KOMBU_POLLING_INTERVAL`` Django setting, which is the polling + interval in seconds as an int or a float. Note that shorter polling + intervals can cause extreme strain on the database: if responsiveness + is needed you shall consider switching to a non-polling transport. + + To use it you must use transport alias ``"django"``, + or as an URL:: + + django:// + + and then add ``kombu.transport.django`` to ``INSTALLED_APPS``, and + run ``manage.py syncdb`` to create the necessary database tables. + + **Upgrading** + + If you have previously used ``django-kombu``, then the entry + in ``INSTALLED_APPS`` must be changed from ``djkombu`` + to ``kombu.transport.django``:: + + INSTALLED_APPS = (…, + 'kombu.transport.django') + + If you have previously used django-kombu, then there is no need + to recreate the tables, as the old tables will be fully compatible + with the new version. + +* ``kombu-sqlalchemy`` is now part of Kombu core. + + This change requires no code changes given that the + ``sqlalchemy`` transport alias is used. + +.. _v200-news: + +News +---- + +* :class:`kombu.mixins.ConsumerMixin` is a mixin class that lets you + easily write consumer programs and threads. + + See :ref:`examples` and :ref:`guide-consumers`. + +* SQS Transport: Added support for SQS queue prefixes (Issue #84). + + The queue prefix can be set using the transport option + ``queue_name_prefix``:: + + BrokerTransport('SQS://', transport_options={ + 'queue_name_prefix': 'myapp'}) + + Contributed by Nitzan Miron. + +* ``Producer.publish`` now supports automatic retry. + + Retry is enabled by the ``reply`` argument, and retry options + set by the ``retry_policy`` argument:: + + exchange = Exchange('foo') + producer.publish(message, exchange=exchange, retry=True, + declare=[exchange], retry_policy={ + 'interval_start': 1.0}) + + See :meth:`~kombu.Connection.ensure` + for a list of supported retry policy options. + +* ``Producer.publish`` now supports a ``declare`` keyword argument. + + This is a list of entities (:class:`Exchange`, or :class:`Queue`) + that should be declared before the message is published. + +.. _v200-fixes: + +Fixes +----- + +* Redis transport: Timeout was multiplied by 1000 seconds when using + ``select`` for event I/O (Issue #86). + +.. _version-1.5.1: + +1.5.1 +===== +:release-date: 2011-11-30 01:00 P.M GMT +:release-by: Ask Solem + +* Fixes issue with ``kombu.compat`` introduced in 1.5.0 (Issue #83). + +* Adds the ability to disable content_types in the serializer registry. + + Any message with a content type that is disabled will be refused. + One example would be to disable the Pickle serializer: + + >>> from kombu.serialization import registry + # by name + >>> registry.disable('pickle') + # or by mime-type. + >>> registry.disable('application/x-python-serialize') + +.. _version-1.5.0: + +1.5.0 +===== +:release-date: 2011-11-27 06:00 P.M GMT +:release-by: Ask Solem + +* kombu.pools: Fixed a bug resulting in resources not being properly released. + + This was caused by the use of ``__hash__`` to distinguish them. + +* Virtual transports: Dead-letter queue is now disabled by default. + + The dead-letter queue was enabled by default to help application + authors, but now that Kombu is stable it should be removed. + There are after all many cases where messages should just be dropped + when there are no queues to buffer them, and keeping them without + supporting automatic cleanup is rather considered a resource leak + than a feature. + + If wanted the dead-letter queue can still be enabled, by using + the ``deadletter_queue`` transport option:: + + >>> x = Connection('redis://', + ... transport_options={'deadletter_queue': 'ae.undeliver'}) + + In addition, an :class:`UndeliverableWarning` is now emitted when + the dead-letter queue is enabled and a message ends up there. + + Contributed by Ionel Maries Cristian. + +* MongoDB transport now supports Replicasets (Issue #81). + + Contributed by Ivan Metzlar. + +* The ``Connection.ensure`` methods now accepts a ``max_retries`` value + of 0. + + A value of 0 now means *do not retry*, which is distinct from :const:`None` + which means *retry indefinitely*. + + Contributed by Dan McGee. + +* SQS Transport: Now has a lowercase ``sqs`` alias, so that it can be + used with broker URLs (Issue #82). + + Fix contributed by Hong Minhee + +* SQS Transport: Fixes KeyError on message acknowledgements (Issue #73). + + The SQS transport now uses UUID's for delivery tags, rather than + a counter. + + Fix contributed by Brian Bernstein. + +* SQS Transport: Unicode related fixes (Issue #82). + + Fix contributed by Hong Minhee. + +* Redis version check could crash because of improper handling of types + (Issue #63). + +* Fixed error with `Resource.force_close_all` when resources + were not yet properly initialized (Issue #78). + +.. _version-1.4.3: + +1.4.3 +===== +:release-date: 2011-10-27 10:00 P.M BST +:release-by: Ask Solem + +* Fixes bug in ProducerPool where too many resources would be acquired. + +.. _version-1.4.2: + +1.4.2 +===== +:release-date: 2011-10-26 05:00 P.M BST +:release-by: Ask Solem + +* Eventio: Polling should ignore `errno.EINTR` + +* SQS: str.encode did only start accepting kwargs after Py2.7. + +* simple_task_queue example didn't run correctly (Issue #72). + + Fix contributed by Stefan Eletzhofer. + +* Empty messages would not raise an exception not able to be handled + by `on_decode_error` (Issue #72) + + Fix contributed by Christophe Chauvet. + +* CouchDB: Properly authenticate if user/password set (Issue #70) + + Fix contributed by Rafael Duran Castaneda + +* Connection.Consumer had the wrong signature. + + Fix contributed by Pavel Skvazh + +.. _version-1.4.1: + +1.4.1 +===== +:release-date: 2011-09-26 04:00 P.M BST +:release-by: Ask Solem + +* 1.4.0 broke the producer pool, resulting in new connections being + established for every acquire. + + +.. _version-1.4.0: + +1.4.0 +===== +:release-date: 2011-09-22 05:00 P.M BST +:release-by: Ask Solem + +* Adds module :mod:`kombu.mixins`. + + This module contains a :class:`~kombu.mixins.ConsumerMixin` class + that can be used to easily implement a message consumer + thread that consumes messages from one or more + :class:`kombu.Consumer` instances. + +* New example: :ref:`task-queue-example` + + Using the ``ConsumerMixin``, default channels and + the global connection pool to demonstrate new Kombu features. + +* MongoDB transport did not work with MongoDB >= 2.0 (Issue #66) + + Fix contributed by James Turk. + +* Redis-py version check did not account for beta identifiers + in version string. + + Fix contributed by David Ziegler. + +* Producer and Consumer now accepts a connection instance as the + first argument. + + The connections default channel will then be used. + + In addition shortcut methods has been added to Connection:: + + >>> connection.Producer(exchange) + >>> connection.Consumer(queues=..., callbacks=...) + +* Connection has aquired a ``connected`` attribute that + can be used to check if the connection instance has established + a connection. + +* ``ConnectionPool.acquire_channel`` now returns the connections + default channel rather than establising a new channel that + must be manually handled. + +* Added ``kombu.common.maybe_declare`` + + ``maybe_declare(entity)`` declares an entity if it has + not previously been declared in the same process. + +* :func:`kombu.compat.entry_to_queue` has been moved to :mod:`kombu.common` + +* New module :mod:`kombu.clocks` now contains an implementation + of Lamports logical clock. + +.. _version-1.3.5: + +1.3.5 +===== +:release-date: 2011-09-16 06:00 P.M BST +:release-by: Ask Solem + +* Python 3: AMQP_PROTOCOL_HEADER must be bytes, not str. + +.. _version-1.3.4: + +1.3.4 +===== +:release-date: 2011-09-16 06:00 P.M BST +:release-by: Ask Solem + +* Fixes syntax error in pools.reset + + +.. _version-1.3.3: + +1.3.3 +===== +:release-date: 2011-09-15 02:00 P.M BST +:release-by: Ask Solem + +* pools.reset did not support after forker arguments. + +.. _version-1.3.2: + +1.3.2 +===== +:release-date: 2011-09-10 01:00 P.M BST +:release-by: Mher Movsisyan + +* Broke Python 2.5 compatibility by importing ``parse_qsl`` from ``urlparse`` + +* Connection.default_channel is now closed when connection is revived + after connection failures. + +* Pika: Channel now supports the ``connection.client`` attribute + as required by the simple interface. + +* pools.set_limit now raises an exception if the limit is lower + than the previous limit. + +* pools.set_limit no longer resets the pools. + +.. _version-1.3.1: + +1.3.1 +===== +:release-date: 2011-10-07 03:00 P.M BST +:release-by: Ask Solem + +* Last release broke after fork for pool reinitialization. + +* Producer/Consumer now has a ``connection`` attribute, + giving access to the :class:`Connection` of the + instance. + +* Pika: Channels now have access to the underlying + :class:`Connection` instance using ``channel.connection.client``. + + This was previously required by the ``Simple`` classes and is now + also required by :class:`Consumer` and :class:`Producer`. + +* Connection.default_channel is now closed at object revival. + +* Adds kombu.clocks.LamportClock. + +* compat.entry_to_queue has been moved to new module :mod:`kombu.common`. + +.. _version-1.3.0: + +1.3.0 +===== +:release-date: 2011-10-05 01:00 P.M BST +:release-by: Ask Solem + +* Broker connection info can be now be specified using URLs + + The broker hostname can now be given as an URL instead, of the format:: + + transport://user:password@hostname:port/virtual_host + + for example the default broker is expressed as:: + + >>> Connection('amqp://guest:guest@localhost:5672//') + + Transport defaults to amqp, and is not required. + user, password, port and virtual_host is also not mandatory and + will default to the corresponding transports default. + + .. note:: + + Note that the path component (virtual_host) always starts with a + forward-slash. This is necessary to distinguish between the virtual + host '' (empty) and '/', which are both acceptable virtual host names. + + A virtual host of '/' becomes: + + amqp://guest:guest@localhost:5672// + + and a virtual host of '' (empty) becomes:: + + amqp://guest:guest@localhost:5672/ + + So the leading slash in the path component is **always required**. + +* Now comes with default global connection and producer pools. + + The acquire a connection using the connection parameters + from a :class:`Connection`:: + + >>> from kombu import Connection, connections + >>> connection = Connection('amqp://guest:guest@localhost//') + >>> with connections[connection].acquire(block=True): + ... # do something with connection + + To acquire a producer using the connection parameters + from a :class:`Connection`:: + + >>> from kombu import Connection, producers + >>> connection = Connection('amqp://guest:guest@localhost//') + >>> with producers[connection].acquire(block=True): + ... producer.publish({'hello': 'world'}, exchange='hello') + + Acquiring a producer will in turn also acquire a connection + from the associated pool in ``connections``, so you the number + of producers is bound the same limit as number of connections. + + The default limit of 100 connections per connection instance + can be changed by doing:: + + >>> from kombu import pools + >>> pools.set_limit(10) + + The pool can also be forcefully closed by doing:: + + >>> from kombu import pools + >>> pool.reset() + +* SQS Transport: Persistence using SimpleDB is now disabled by default, + after reports of unstable SimpleDB connections leading to errors. + +* :class:`Producer` can now be used as a context manager. + +* ``Producer.__exit__`` now properly calls ``release`` instead of close. + + The previous behavior would lead to a memory leak when using + the :class:`kombu.pools.ProducerPool` + +* Now silences all exceptions from `import ctypes` to match behaviour + of the standard Python uuid module, and avoid passing on MemoryError + exceptions on SELinux-enabled systems (Issue #52 + Issue #53) + +* ``amqp`` is now an alias to the ``amqplib`` transport. + +* ``kombu.syn.detect_environment`` now returns 'default', 'eventlet', or + 'gevent' depending on what monkey patches have been installed. + +* Serialization registry has new attribute ``type_to_name`` so it is + possible to lookup serializater name by content type. + +* Exchange argument to ``Producer.publish`` can now be an :class:`Exchange` + instance. + +* ``compat.Publisher`` now supports the ``channel`` keyword argument. + +* Acking a message on some transports could lead to :exc:`KeyError` being + raised (Issue #57). + +* Connection pool: Connections are no long instantiated when the pool is + created, but instantiated as needed instead. + +* Tests now pass on PyPy. + +* ``Connection.as_uri`` now includes the password if the keyword argument + ``include_password`` is set. + +* Virtual transports now comes with a default ``default_connection_params`` + attribute. + +.. _version-1.2.1: + +1.2.1 +===== +:release-date: 2011-07-29 12:52 P.M BST +:release-by: Ask Solem + +* Now depends on amqplib >= 1.0.0. + +* Redis: Now automatically deletes auto_delete queues at ``basic_cancel``. + +* ``serialization.unregister`` added so it is possible to remove unwanted + seralizers. + +* Fixes MemoryError while importing ctypes on SELinux (Issue #52). + +* ``Connection.autoretry`` is a version of ``ensure`` that works + with arbitrary functions (i.e. it does not need an associated object + that implements the ``revive`` method. + + Example usage: + + .. code-block:: python + + channel = connection.channel() + try: + ret, channel = connection.autoretry(send_messages, channel=channel) + finally: + channel.close() + +* ``ConnectionPool.acquire`` no longer force establishes the connection. + + The connection will be established as needed. + +* ``Connection.ensure`` now supports an ``on_revive`` callback + that is applied whenever the connection is re-established. + +* ``Consumer.consuming_from(queue)`` returns True if the Consumer is + consuming from ``queue``. + +* ``Consumer.cancel_by_queue`` did not remove the queue from ``queues``. + +* ``compat.ConsumerSet.add_queue_from_dict`` now automatically declared + the queue if ``auto_declare`` set. + +.. _version-1.2.0: + +1.2.0 +===== +:release-date: 2011-07-15 12:00 P.M BST +:release-by: Ask Solem + +* Virtual: Fixes cyclic reference in Channel.close (Issue #49). + +* Producer.publish: Can now set additional properties using keyword + arguments (Issue #48). + +* Adds Queue.no_ack option to control the no_ack option for individual queues. + +* Recent versions broke pylibrabbitmq support. + +* SimpleQueue and SimpleBuffer can now be used as contexts. + +* Test requirements specifies PyYAML==3.09 as 3.10 dropped Python 2.4 support + +* Now properly reports default values in Connection.info/.as_uri + +.. _version-1.1.6: + +1.1.6 +===== +:release-date: 2011-06-13 04:00 P.M BST +:release-by: Ask Solem + +* Redis: Fixes issue introduced in 1.1.4, where a redis connection + failure could leave consumer hanging forever. + +* SQS: Now supports fanout messaging by using SimpleDB to store routing + tables. + + This can be disabled by setting the `supports_fanout` transport option: + + >>> Connection(transport='SQS', + ... transport_options={'supports_fanout': False}) + +* SQS: Now properly deletes a message when a message is acked. + +* SQS: Can now set the Amazon AWS region, by using the ``region`` + transport option. + +* amqplib: Now uses `localhost` as default hostname instead of raising an + error. + +.. _version-1.1.5: + +1.1.5 +===== +:release-date: 2011-06-07 06:00 P.M BST +:release-by: Ask Solem + +* Fixes compatibility with redis-py 2.4.4. + +.. _version-1.1.4: + +1.1.4 +===== +:release-date: 2011-06-07 04:00 P.M BST +:release-by: Ask Solem + +* Redis transport: Now requires redis-py version 2.4.4 or later. + +* New Amazon SQS transport added. + + Usage: + + >>> conn = Connection(transport='SQS', + ... userid=aws_access_key_id, + ... password=aws_secret_access_key) + + The environment variables :envvar:`AWS_ACCESS_KEY_ID` and + :envvar:`AWS_SECRET_ACCESS_KEY` are also supported. + +* librabbitmq transport: Fixes default credentials support. + +* amqplib transport: Now supports `login_method` for SSL auth. + + :class:`Connection` now supports the `login_method` + keyword argument. + + Default `login_method` is ``AMQPLAIN``. + +.. _version-1.1.3: + +1.1.3 +===== +:release-date: 2011-04-21 04:00 P.M CEST +:release-by: Ask Solem + +* Redis: Consuming from multiple connections now works with Eventlet. + +* Redis: Can now perform channel operations while the channel is in + BRPOP/LISTEN mode (Issue #35). + + Also the async BRPOP now times out after 1 second, this means that + cancelling consuming from a queue/starting consuming from additional queues + has a latency of up to one second (BRPOP does not support subsecond + timeouts). + +* Virtual: Allow channel objects to be closed multiple times without error. + +* amqplib: ``AttributeError`` has been added to the list of known + connection related errors (:attr:`Connection.connection_errors`). + +* amqplib: Now converts :exc:`SSLError` timeout errors to + :exc:`socket.timeout` (http://bugs.python.org/issue10272) + +* Ensures cyclic references are destroyed when the connection is closed. + +.. _version-1.1.2: + +1.1.2 +===== +:release-date: 2011-04-06 04:00 P.M CEST +:release-by: Ask Solem + +* Redis: Fixes serious issue where messages could be lost. + + The issue could happen if the message exceeded a certain number + of kilobytes in size. + + It is recommended that all users of the Redis transport should + upgrade to this version, even if not currently experiencing any + issues. + +.. _version-1.1.1: + +1.1.1 +===== +:release-date: 2011-04-05 03:51 P.M CEST +:release-by: Ask Solem + +* 1.1.0 started using ``Queue.LifoQueue`` which is only available + in Python 2.6+ (Issue #33). We now ship with our own LifoQueue. + + +.. _version-1.1.0: + +1.1.0 +===== +:release-date: 2011-04-05 01:05 P.M CEST +:release-by: Ask Solem + +.. _v110-important: + +Important Notes +--------------- + +* Virtual transports: Message body is now base64 encoded by default + (Issue #27). + + This should solve problems sending binary data with virtual + transports. + + Message compatibility is handled by adding a ``body_encoding`` + property, so messages sent by older versions is compatible + with this release. However -- If you are accessing the messages + directly not using Kombu, then you have to respect + the ``body_encoding`` property. + + If you need to disable base64 encoding then you can do so + via the transport options:: + + Connection(transport='...', + transport_options={'body_encoding': None}) + + **For transport authors**: + + You don't have to change anything in your custom transports, + as this is handled automatically by the base class. + + If you want to use a different encoder you can do so by adding + a key to ``Channel.codecs``. Default encoding is specified + by the ``Channel.body_encoding`` attribute. + + A new codec must provide two methods: ``encode(data)`` and + ``decode(data)``. + +* ConnectionPool/ChannelPool/Resource: Setting ``limit=None`` (or 0) + now disables pool semantics, and will establish and close + the resource whenever acquired or released. + +* ConnectionPool/ChannelPool/Resource: Is now using a LIFO queue + instead of the previous FIFO behavior. + + This means that the last resource released will be the one + acquired next. I.e. if only a single thread is using the pool + this means only a single connection will ever be used. + +* Connection: Cloned connections did not inherit transport_options + (``__copy__``). + +* contrib/requirements is now located in the top directory + of the distribution. + +* MongoDB: Now supports authentication using the ``userid`` and ``password`` + arguments to :class:`Connection` (Issue #30). + +* Connection: Default autentication credentials are now delegated to + the individual transports. + + This means that the ``userid`` and ``password`` arguments to + Connection is no longer *guest/guest* by default. + + The amqplib and pika transports will still have the default + credentials. + +* :meth:`Consumer.__exit__` did not have the correct signature (Issue #32). + +* Channel objects now have a ``channel_id`` attribute. + +* MongoDB: Version sniffing broke with development versions of + mongod (Issue #29). + +* New environment variable :envvar:`KOMBU_LOG_CONNECTION` will now emit debug + log messages for connection related actions. + + :envvar:`KOMBU_LOG_DEBUG` will also enable :envvar:`KOMBU_LOG_CONNECTION`. + +.. _version-1.0.7: + +1.0.7 +===== +:release-date: 2011-03-28 05:45 P.M CEST +:release-by: Ask Solem + +* Now depends on anyjson 0.3.1 + + cjson is no longer a recommended json implementation, and anyjson + will now emit a deprecation warning if used. + +* Please note that the Pika backend only works with version 0.5.2. + + The latest version (0.9.x) drastically changed API, and it is not + compatible yet. + +* on_decode_error is now called for exceptions in message_to_python + (Issue #24). + +* Redis: did not respect QoS settings. + +* Redis: Creating a connection now ensures the connection is established. + + This means ``Connection.ensure_connection`` works properly with + Redis. + +* consumer_tag argument to ``Queue.consume`` can't be :const:`None` + (Issue #21). + + A None value is now automatically converted to empty string. + An empty string will make the server generate a unique tag. + +* Connection now supports a ``transport_options`` argument. + + This can be used to pass additional arguments to transports. + +* Pika: ``drain_events`` raised :exc:`socket.timeout` even if no timeout + set (Issue #8). + +.. version-1.0.6: + +1.0.6 +===== +:release-date: 2011-03-22 04:00 P.M CET +:release-by: Ask Solem + +* The ``delivery_mode`` aliases (persistent/transient) were not automatically + converted to integer, and would cause a crash if using the amqplib + transport. + +* Redis: The redis-py :exc:`InvalidData` exception suddenly changed name to + :exc:`DataError`. + +* The :envvar:`KOMBU_LOG_DEBUG` environment variable can now be set to log all + channel method calls. + + Support for the following environment variables have been added: + + * :envvar:`KOMBU_LOG_CHANNEL` will wrap channels in an object that + logs every method call. + + * :envvar:`KOMBU_LOG_DEBUG` both enables channel logging and configures the + root logger to emit messages to standard error. + + **Example Usage**:: + + $ KOMBU_LOG_DEBUG=1 python + >>> from kombu import Connection + >>> conn = Connection() + >>> channel = conn.channel() + Start from server, version: 8.0, properties: + {u'product': 'RabbitMQ',.............. } + Open OK! known_hosts [] + using channel_id: 1 + Channel open + >>> channel.queue_declare('myq', passive=True) + [Kombu channel:1] queue_declare('myq', passive=True) + (u'myq', 0, 1) + +.. _version-1.0.5: + +1.0.5 +===== +:release-date: 2011-03-17 04:00 P.M CET +:release-by: Ask Solem + +* Fixed memory leak when creating virtual channels. All virtual transports + affected (redis, mongodb, memory, django, sqlalchemy, couchdb, beanstalk). + +* Virtual Transports: Fixed potential race condition when acking messages. + + If you have been affected by this, the error would show itself as an + exception raised by the OrderedDict implementation. (``object no longer + exists``). + +* MongoDB transport requires the ``findandmodify`` command only available in + MongoDB 1.3+, so now raises an exception if connected to an incompatible + server version. + +* Virtual Transports: ``basic.cancel`` should not try to remove unknown + consumer tag. + +.. _version-1.0.4: + +1.0.4 +===== +:release-date: 2011-02-28 04:00 P.M CET +:release-by: Ask Solem + +* Added Transport.polling_interval + + Used by django-kombu to increase the time to sleep between SELECTs when + there are no messages in the queue. + + Users of django-kombu should upgrade to django-kombu v0.9.2. + +.. _version-1.0.3: + +1.0.3 +===== +:release-date: 2011-02-12 04:00 P.M CET +:release-by: Ask Solem + +* ConnectionPool: Re-connect if amqplib connection closed + +* Adds ``Queue.as_dict`` + ``Exchange.as_dict``. + +* Copyright headers updated to include 2011. + +.. _version-1.0.2: + +1.0.2 +===== +:release-date: 2011-01-31 10:45 P.M CET +:release-by: Ask Solem + +* amqplib: Message properties were not set properly. +* Ghettoq backend names are now automatically translated to the new names. + +.. _version-1.0.1: + +1.0.1 +===== +:release-date: 2011-01-28 12:00 P.M CET +:release-by: Ask Solem + +* Redis: Now works with Linux (epoll) + +.. _version-1.0.0: + +1.0.0 +===== +:release-date: 2011-01-27 12:00 P.M CET +:release-by: Ask Solem + +* Initial release + +.. _version-0.1.0: + +0.1.0 +===== +:release-date: 2010-07-22 04:20 P.M CET +:release-by: Ask Solem + +* Initial fork of carrot diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..a61b832 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- + +import sys +import os + +# If your extensions are in another directory, add it here. If the directory +# is relative to the documentation root, use os.path.abspath to make it +# absolute, like shown here. +sys.path.append(os.path.join(os.pardir, "tests")) +import kombu + +from django.conf import settings +if not settings.configured: + settings.configure() + +# General configuration +# --------------------- + +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['.templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'Kombu' +copyright = '2009-2014, Ask Solem' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = ".".join(map(str, kombu.VERSION[0:2])) +# The full version, including alpha/beta/rc tags. +release = kombu.__version__ + +exclude_trees = ['.build'] + +# If true, '()' will be appended to :func: etc. cross-reference text. +add_function_parentheses = True + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'colorful' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['.static'] + +html_use_smartypants = True + +# If false, no module index is generated. +html_use_modindex = True + +# If false, no index is generated. +html_use_index = True + +latex_documents = [ + ('index', 'Kombu.tex', 'Kombu Documentation', + 'Ask Solem', 'manual'), +] + +html_theme = "celery" +html_theme_path = ["_theme"] +html_sidebars = { + 'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html'], + '**': ['sidebarlogo.html', 'localtoc.html', 'relations.html', + 'sourcelink.html', 'searchbox.html'], +} diff --git a/docs/faq.rst b/docs/faq.rst new file mode 100644 index 0000000..8275c82 --- /dev/null +++ b/docs/faq.rst @@ -0,0 +1,16 @@ +============================ + Frequently Asked Questions +============================ + +Questions +========= + +Q: Message.reject doesn't work? +-------------------------------------- +**Answer**: Earlier versions of RabbitMQ did not implement ``basic.reject``, +so make sure your version is recent enough to support it. + +Q: Message.requeue doesn't work? +-------------------------------------- + +**Answer**: See _`Message.reject doesn't work?` diff --git a/docs/images/kombu.jpg b/docs/images/kombu.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78b08e77d8496548c34718a721b6f31ca5ad7730 GIT binary patch literal 115763 zcmeFac|26_`#*lh*w=(4SyED2$G(KhmW1p(F~$-zgE15_ty+*$D3U^C57|R0R7lpb zON(V}EtVR_e9w$Ty_9*eomcxu5;bjb)D-x@8#V0ah>~+yU6c>O-FR~ zbpZ$s1b71e1IR;rx9ObpbOQiGLtq~O0E_@Lgae=fsmrVWe+h zY$U8@YTF0RF%O03fz$<@cZ~+65te%meL@#(1E8gtg3Mg{?eM zuINAvAS)xItSTd?Dl0DxlU0?0slq^g0N8yle#KXsf^&(#>GI1WzkC9Pt^)uA_(Llr zD+{|0UGbZy<~r?fy7LBPSq4G_1#Zy%wnK=f1t?g72+@3tru)q&Ml<}f526;$xO%Q= zreCxW$Y)-WhtfocQSD2Bm@r3Qr=Az83|>O?jWu-iDt&*&XPAu4eaJ7HpcnUPR`7*f zzYkqQ)2-0Z!^ynA$0aHr*p*2+j z-CD+tRt78=kVo0*L2#Gb5qRd+b_6Y6ZAZ|mwe1LOtH1n(*3tYfPtl{x=77WCFXi0S zU-H^EPT_%E@+81YZ7WV%h5&>-1+Y_(0M|1VO>!@A0bpfhWMpDwWnyCGWMO9E1-M#l^$LwUv{Vy8ZfDKKMI@u(7bPv9oPrXWzul&d$zFN!YoURXEpUfcy;LWCZR5 z_n{CGfQAzS<%E#?!OLbQzl3OlUeZCR8|W})gMu9xEgd}rBNHa-P$+JU3F?w{1tgyEj}Q=6<&DJhW+?mSL{~m#V0W@+ds`bEUNAJ zJZtF|5Eh$OT-P}~r=(})9(XA(y`&yLGS5k=AT+eJRG%2==@h6g?0{{i1wHT;;i8j0 zdxM+e!E@7=FLJwFhWyS&Ynk!DG4fwU=_w8{>{h601)WIYb=9#@^hbR7?ZSWdKy1|o zau2`?rJj=$&;(}9qxD7in!O@)ZN+bsdR$z0#z0GOlB1;~0%KA8HpIo-onf5*hD@t* zQKwPoV_ut?s*9mBx29V{YgpK)vnvFlMk zGVrE4DSgK24xx7}Wd!^5QXGpeBF9q_1Iy1v9*7jEkQ%xYHixSTeOGYnsp&!b%ffTg zF46Ce9Wzs&$j*6i%rw$(-H8cFDyqivC5C=Dm{F>@@Tl544irJlD$@J z`?JE$41R(`qIF3*JNas!UQ711$FjYB_#>4Jcs+u<CL5_(m^9^+vC2*o!aaV;R(+%sh>?9MO9>o0Jbx=HISmyv3|xghs6A4i~-2 zqdS6}M0*&Cw_5+yczK2E`KX9d*iP{sFFs9oc_wr?!HccVTU_{JhcbIf20Dqn!w-GO zcFg4{7esz_6?$*HG&{qqp0oMGHilidEwTxfs)g*YoHf|gqWYQ|E{o;#w2ie`!X|7Y z$v{f^fTPk+GLVmZoVqBO=S{Ca_&XA5x?BcWIK}E!QPXB~DCe+kF4^Z*_&OY#v_s z=GA-WCwVSd z+;Z+73o>7N=>Nf4^#%5Jdlg>bZSkAi^IQurd+?Kii;i^?*>~}U!=i&~ZsqB_6f7a90qsSX(Fg`ok*qF8$ z(U<2wHfWU+W&GVUjuSU9L|m+jZDzOev4Bd1ehz0i|8ua@;PX$M&2!>*+(f_lH<-`Z z7T2om6O40lVP{6kfR351cg0enU8kU)@_dR(JUa&9aX$yem0z5Xi*Ua=8-4fW z2jy#fh(%F^2ShTkD0}jLGfUoVus;GXkqt~F{!Cwv;IS-4-(-a{j5heI7?1}ts<5@dHwnkykw<_c> z^k{{k5wk4`vE!N_g$jS3s8VtiOvp#RDZa`UME?;_JXCk}g#=b35sOo7JV8R+;Yqwb z>a<-SujlXObBan|v^;uuibgvh9w9!H&Vf-{;(wvwW^$=5Xio07y2fo(^02kietMb0 zNlA`Ns)L2Sw;t z9ooAk?Hrke%zg{JoM6kSY>C4d?!I=R;@#l83tRoR*+>*8B%M|{H~7R1K3UML-!2<# zMN}jM>837q#}O6;C9M>*yTi_ndYj@n@9p{G+t4wEvvqn{zc96sp;R#0^_Bch!?tp~HH?&yXiHp}nuUwYsZ$m}HP~Bx2xBdv1vk z!y~!fbt-SU;eC^DQQdPL2n%A>J^Xa2c!9*?dZyLo6%WJcE1fyWY3VB#yQA3X*{d3ExZMqZ)Nq1Z%pBd6)Ct5FaqH1!QhR&0gEv7_A$xvgX#^zvEIJuRd*lFf*(;72Y}?oaDfZ zhugavh^r(zv<`kTniXhKx2=9YKnic$Bw&o|9@Cx~ikhllN*dQ&x@D@r>uNo@Ww}f7 zt+wBr-yPAwlR&6JOopo2!296^qi&*3Eu{7qGydy~StU3nB6GDt&_o>PY=P74P5Q@& zNj#rQx2kCoz4|)IfK%&CKN(3{5Ij zJ#!pa*UcYDa3CHUuWlg@J|F|Md7rSRzR<6J(l5tKy|EmP#X>hdpB4`m9D9b-$B?8u z^1IQa1QL87agQPXtNQ81U=VBF8#=|VpElX|WFa~=#s>!9_7Xy`<$b|x+zlcFiS~oX z8u+YAS~>EzTpxCBapup0S0%OJYW(2D{Ml-H5KKpuP7ZE}R7|T`4A{;v< z?g|gzGSnNcjk**i#CPe(m%6U%jB;X_x`GlvT>>;8(kCI@oRJvNNh+ROavKi0DYVq` zv19RbOb;}{=Uia^B#n#vF~}bOD{N2C0!YBW}Ax^o29G1GH?yzFfjJ{Gl$SFv21n4L5DkW?EPbtUHZe2I|Pi8{6bBb~Q1 zUkS(6wk}Nj%vn_99$MhCUBQkC-jQ1kuWE_d^^|o#vf}U@r&C$(lH+tx)}@=*VBb`6 zW{OsAg@yrYqUXHpt8xYr1e(~X1xwAc_FrlG0bEq|?!@{3XgYeoOS{e>xQ z7F)|KWf;1M5`}w)L+&h)_D#I12r;)?V)^;GY!;qHf;SQPG@le0R?6cmH2R`@x)1xJ zyX~TUyu|BDM96?q=3^V;_RImWLkwYwQ;TWSJ3Kx)R%!IkGnuICQ2M#f1NRU!Xk5~< z6p5b74VND0HL5KuRo`82<#4<>JCm;sSiUHLr}4)+{6$crdG}L$teBAQz-$I(9Nq zF2e@yMM`G-fy$Vg&6418uqf8D8SO0~=4Q1NzeHz;Xu^tz>;m*A#lQQW$=jvY9(;`q z9K2PzaBsxEuG^S|ded2gU(Ci8fAIJA+?moza5S**I)&seC?=lG4YrF6*CII$4~~b1 zw;z6_qt+R_ zd&V=KPXuSbg9P;9+H7VF;6gU!GXw>5#IWOvh0 zH2mlzJx|KYa1EjBOjZ;+k%rPOrcWWgB07@+1^r#o22SMG<<7&04A|`oaUaVSsxP7Ay1EunlCF}xL(2@`?o zQRzRnd6V3lXYL4IBGT0}&>ZL75$I%h+xS4k&hLhb#IWB(;MLwP+K99tC8_i{YaXA6DL;%JammKqw_qng(Qw#GT}ty@o3>JEQ+ z7l7JqFH)?KV%3X{=q*SKkq+X27@RqFrz)n*xI{>dIWxbhYgSIQL@_P?EB_6>oN%N~ zd)rt+f83x$ObgM{rP5}!)X9oH#IQKx?hizq(~WrXv@k2J&<SrtG#7-2ozL=>hz-|}H zew$O;B_MciJb(U4W$XuH*$g~C0?8JjP}FT?<}hYqH7Z?842|b7&$~}@XD8emcKzVc z=G!~h7zmdiAZQIogysL_!4oy@-pz&tlL5WarEAq+o>qxj6RdCBIB0Hp?x=B!Equz`+|KC_KBq?9T4+e7d8xC2#`dW#86e zM|HJIrRsO(6#?MDT)gCG?eiA`k&(#zMX9L#`-BlLxPzVXbY)E8;1XLBIKzqPBc`__ zRZ1P?t?YCy=4}uUQuj}!)yu}}{a_9#C?^9i%x{b41UkKVI9F0ejPsavy4KxmN6Q-& z9DcOJq9i0?M!Krbc`3Ge0n=I-Vlmrro_KkVl$&$5F|$uqzydAlOXD|z2z@A&T!B!s zAc9V*FKxSfI?3^B#mmnR!IpLeM{fJw(rgZE+N&NOA%bJh?$ZL*Gw~G{pNF`DNB2VH z&!_OCritIb`tp$hL-D*PQfCSsH!qy@KQT)np>DRH&`b>X6zX=*)Eu*X+8BI!XDK~> zJkBPlvI&vib~4tQ4lVThWY3gl_S4r-dPWMa?|v4bx9$96GGMX!F|3}%FR_SMk5+uP zxlJYzC)Rb<8*6TGz$d9?OvJcop&H|5eo&#T2r1G)|4xCuZ_JC~8?F^sn*w*96??Cg z!Hg;nODz5*d*6%W)XaEBg}C6us-E%CX1liX!4dI+gz{nKypUXk6kTzwa%Wv6QZ{7Y z_$Hh|R`tgdBmt*XyRDWsx-5}O@~D7b+j>6E++N3)(J3-;b~XvFS(7$>}xhylH|FaXqS#B-!jJ*NTu59;N*fXPaFz@_@3qr3u)`ZKI9KYW zV+W#IM|m_MHN)*6dEGv~GjurE9V=srclBr>ZR=gEC&aYQz6!-7pWIc=D2UGumXx>G|=ci&K+j{##^*Bn6Thb zonA=NTr|y?#pU=*9L2j>v=t;Q7zc2^4mgILZM!~wuqpR^L?g~>6j!sLnYe^$r_(eq z56{X|%2K@-9%i?=bc*NclTbHBE`hnmOOuQBK^n9hNda_)3KM%BEmdKy_e z+$$@%PvfCN$fcGo@X9GTpL>r4Y4ed2vqw$NhZ?aA2rX#tZVY+Pfw7$weE)6)6_^h` zi0F!%HN`It*7)#iCBo0XZ%zrmJy-IbN4x-4s@fMdBI{kadmbt~qB3uk^rmEt4D4_q z7KCWzUGZv!UGoXPc1FH)5jog_jI)|Hv3?w6C%J8B(+o!zTjsSA3-h9)_R!YO_E)T) z@UR$jyhgdk&G|N`6rz`8;Y8P*-HAy&OPe5}>aqV<)NmQzHUB}Ij9qztPF!e|S4dJL z85laMIXXLk#AuxT*}PE6MX=!=#*y29HN23MQ9t{-uWX&!vWW9^;ULBgNI}P0E-ilb zQue8R_@ahCCcumgeA&P2rqFG{`tU_3GlR_8pdf@q@tc0k)Z^m#_{p1N`Nh`reKvvV z`PE-6(fK|L#hysDdJd+cj zLR_1LtiMN1+6AF_^mYm+9m;o4kFra2R1~Ba57~b@czaJKTsGvWlfh5?rzH|Pt4R^N zlz3m{>D8%|zHBs414AV4T0mX=HiY*feh(6pIrDPe5g4qL{P>?AbCeaG{w`fw)2c=%X|dB+S+HZzkX zom!enOz=Y&%4HW%WyL&EOtPJyyisv9E~+y-qoP{RQc+iF@dQC5&8*Xpm?Plash(Y% zR%d2+faDxgv`}uuE?(S$&;XwTsSCAyBm*jhkb?F@m%U~qdV-wnv&Px! zegoX|>b9(m4`iS}YHa%g(cNZL-X`hd7}oK8?7LocHoq@2n-ebnHl;UIpaX^eUKOS* z3J%w%XE~pkwe-%~X^ONZhTpl;?wvnHI`xzFaaj&h=Uz@p&=k6I4!qA6^d}{gfy0-q z^v6`nI{f>tRgPm5tmojYyrE$w^r07HKZA31h4#=W=1U+RDWC?y1)DvU)TwGDdb_mg z1xoT1H!1O9XIOQJ%0OO@R}%JGRf#L{T0R+gKX885fNp+vTTsWyyRrT)b%^0lHzr9e z2}tW%LS41EAuw0A{4ZSc&NvW^ieJI_4TGWo|y{cCY^*~w?8HIA!mE3F>U&r;{uym$^3_uZCttJAv zI2{9zpF*U)3Sd`|gCln=_XVdYbx5P+{6bed2PysyI2=xKnV}o0wq*imp4hsCjQbq1 zffY-US9YtFI@$H{x55Ud=nrekIefw;vP7}N?;dZC&B`Z!Xj$OA_@bvVQ$I>^pyMSm zRsD=218v}iYoqMV9X%vehMOcj)Kbs*Z3<5TM9JpbT$`Q!$C8kwRJeMBTKY^>_SOIm zRk@~?$r3q%g}qVf!Aosp@AE5gcPrFmO|&!(h?*muM7;xV2*bU6NbAzBi+K%iUkAL& zOd2R0Q|;A!*4eyB=R~ZXy~&d(JYy8s?pL0k=e9Vp7)?qmp2S*POS}$=vx+2?bX3oi zRKf9t-wrw(Crds&tlFF8h|B#jVs_`+fs{}~?~HDh5P6$|k`J!kfw_%Vq*x;E*+<-& zwCN69W@W(BMO1#f?{r%3E$Aqs7>SL_bJFLFCn`;z z1e-#JN^dV+9scqD#eS)|UAnagrsM-uQS3M)MEWC}$fVYwoUo?}`I57D=Hf~#+N4=Y zTqJa;_?-2dJol6t@S&uBWNgr-j<+d@N+tD~o@uY7V;meFooS!UC)-^YgqAonzIZ$z zEfp}}nkyDF2?$lLN9`7SG#nnHYhxMGsZne}JRf4P7x(>M`e@7eo0O~%g&e9OmJAB5 z&O`6@a*Vn#CrSddrUxxl1dBDlGoE8lG(WhF^r+mulhr#S%0TJiOx7f(YtW|a!L{J< z`bXn?)9OBUkpb;bAiVTLqSOg-SbA#uBnV` z9A4G7g@S{@xOl@|@q+TDfW#A6Zo`CEZr4rtCj!aV(?{yzZVH2`d5gOh zDkL0lT<&w~{!!vG20xU2xCI>sx>D=y`IQO9xoc@H)Ox7B3mWATg_2ZDUjHEb{`HxTSxktEubOwkmL zgaZx4=Vzk?-=IRhp0_GaaThu@W0!KOiL8|nKOwU{nSDe5 ziN;26$iS4P9?=4ydb92M^3A*R*Uei1o7S9b^0Txn(jT|4!7m5DpxjXwIDV!{WIC5` z=(>H2-8_594kII;989%JBtgg>Awl%6p7g|Rx*c9U)x6{_D#kk=f?cXD@sG=IFAL+K z8SWfze%DWQN-outgEMX`t2cQK2_ys7;mEP>*-J_%Q!69cPhu*0gQjEayC>YXoWt0wytngKi!8q1nc&w7#l~i3nu1Cd;5cYPex6G3An&8)eI;7pQJyNN1wM~tK`;vNcgLrwd zTc`D#-fMPVp8jq|QfF+=99Dkwa&YdeR5POR)}F3~t>cM^ zTs2>?(TZn{5f>&h5Y=81mr5h64{uWP6^jdwjeI*+bbQFfFq`kC%1;}?2O)s%d(VM7 zhNU?6>W5=i1e^*Ppt+|hdg;4i@SXOcdvE#}vPSWh@dBSpI)m+m^ygfy2Y)nMxlD`a zz7YJ_(OQnXSFlP-vO)EX42+Y|8 z^Am#jq>E$KZ-XUKBFjg#=qbvES8L19-+@Enp1wUJpH%Bh+Cz$iG-sQ?c)ZB$p4t|M zH%V;Ed$3>-6k|Fku)QHkSK)kM$xL9&4TAJxd!R-ore9Yp61@cYz_DNS9rR7k}-HwXwLyk96%o4L+P07<4a_O0NwFgXNwNa~jxh#(oZL z)#{4BQe$3>er4{c<-`{FN?P`$!#;rI#WAp zV>z(sCSTMw@S5LYmqs0)$YM7$Z#mss(s9UPFXuRlO`;*Ue-eJ@iE;wep!hNosgRVu z6xA2=^MHJUM6lg)%mKA~C&R`0EJI_K=FwG2EmoC5(gwt$In_Jxtm3{{wRU#@N%)n% z{s{J&dAaX`bD>d-+oC$zZ?u0S@lWLc_%Wj}kkc(G9YK30JqsTwp`Vv^$i z>ecf3i)yi9B3<&^`<`!W8nKZT%6;y~PTNWm1tgx%)xL_v{jqU*- zUJMZ(a6=(NN8&y6+JXx8hsLcwXT3h|JS+I%O-$&Q=aJe*{O!AOJqL1YbhVOFSqA5?HtiC zzj?`2O+e=wi9hQUZIUOvXg_JQ>1BaQGp;KsTpZKPWFYn1EXg{#_pWz-r{#RHO`&@W zD^D3Xm{u#UY{sR%nR8kk`Js?5T9R)Ky@iqR%80d5n2pgJ)a#%`qjCx40O-S~iE(lSaC=nq#dcmqPAMwDQ;r8!G zEEnSmh?3zT&6I@zM`NPA#AdxqlMePW_(O=z471t@GJq3gDA3<^UF+AMwH1PH7fztD z*zyrrBQE(>-xqw*bl!mNV6biJ+l2F-<-shn`4Y|aX%=(wJ;NIH@PrVFh*(GU6Mhbt z;IQh1hU9UNyjfzF13`b+W@%#0E@+7HdJK3FZLIQXe^I=@qc z2M;gk1@8|H5ACxE#p(gzl39DLyTX0U6Ja>`WP->;1{~0-=Zeb- zdw%Y}GRi(vYoYe&0}@tzH>>ZJP{QbN#iI`sODT#w)Cg5yw>aFI55V5;QqrsM7*ner zlWN)(DNvgxdn?ptD;WsXI~UloJ%c2u&$@qVY^%=T$kj~uC7K+QS)53kx0{g{ywbYV z`r~4!f3GG1?-p$#_*`taKC|2n!QBIg{f|C>i#83snQ(cC44m_S!V;R5;8522N%==L zOZrprQR7s6N6)Ssz?3BCt&iVc)xTRfMY#E|JwD`*MlK&yTmb-6caLBB)MI*#w&8?bc+BRE^+)R=PzXO~e}pT@0L)MM zAbtrmFAGyRlpKAeyD<3kfBX8OSU+!b9cy#&RBOQhlVgZ+NBqH|clP!-cXt1S#|}q$ zdz&Lp`s-s3n;RZ=L8HAX1{qh_e+V;qp#6dldV9LBAjPqK@rPH~peBPW!p#}$?GKVn z0SG_;e-XD_VgD)4;^MA__D1`y0he>x{vo~J8K4Sagho-YX7We-7-Ri0h_w}m*&D1# z|KPH^p#8xL^$$J^Si~NGi&IQ>rkODo2q3L_NTS0^9xN z9r7N4NPr|5x4IRKI^Bp<-H1~ip4*61-H21&h*RB&Q{9MD-H21&h*RB&Q{9MD-H21& zh*RB&Q{9MD-H21&h*RB&Q{9MD-H21&h*RB&Q{9MD-H21&h*RB&Q{9MD-H21&h*RB& zQ{9MD-H21&h*RB&Q{9MD-H22D|9zY)HRfsu7+M7Y7T``@jgSL42)F`hzy&}6!oV@W z1El@I9YdvjKrycWA|VV0N1B0wk(3}V>KT@!C>g+LN^r?N2)J^nmW_wMzmKZ4Gzuf- zOj(Ol3XVofpLF(-hDpgt1N${k`Z&YA5dOk02r!ONU1YkVT142>Rb9kN!BED~M+@QM zc{s=qVHRX$4iEBztGJ42YB29Vsd^IWgGBf{3!g-KqcEx`)kUbrRY97VqXKcU-}=s)$=UN+i1CH`5i-JJSiHUD0dCnUrPVT z_*>K36oEl|W0%jQEJN9(!R07rWPTY~oBy}2&41BdoBy}&s;ULb)Bm@>f3g48GIaJr zX#8Ut*k7(vGykE1c13vqQUTYAT@j^n|CTiZ0S`cEd%7bqphpUd60!B{OT9#qg)IHaSi1&S(Z9n{rUl-H4!(^1fc!IXdb`-f-0+}0KAtCktAS1am)hT1f2&dhi_O5!8$s!rsMgl7{wa<00fUcG{unJZ%FWXq3r2)f z{9Kh=sjy}U-#@u48Qz|h6H)3kNd2L=Mgr{b{)>d%f0Y0c_%|EN+&|s>ec}J@>h}Kv z2fO3EuOo_ewWQ20+4!HyQZUj-pt*KKlD<8PZf zCwo(;ANvUz@|%8{B`&*g4D}f5u+abi;ylB z1*djCy3XF1-{)Ro{)>Vs!d2%Kf@*H9${O~n(#oY!I@UG4)UPW>!=1g4B2e!B9vX6# z+PMO(?wX7>wy0NYMrq_=$sznb&|nk!b%H;+f9vRadLw?7-3l6OSbs~K!NFH_C!aipkVp_Mq=x)T>-_iZzl~5q({lEqG*K#= zD;N5YjK4MYoIO$CebL_!90RQ=t>yhKPnC5B2d68-zZri^nmL2sEZ=HZWY@6%A?@Mp z=Ys-oP`?eE{pS2b7VB~X0r%JT#Q0On$kZ9-zS>ck{afbW7XBE_$|))<%1g-0s3^)% zQ*Z#S1e0A(WvQIyR1V}Tz?Nm?B`8`-)D#?Sg9B-h1EwHHURgngnt}sukOqg@U!BmBkDua?>N;#JtIEbgDl(WczgL_H}>Vhd%7aZEl!{otSN(y=lrpp{9 zDy>XORX{(%lq#d50Mbgb6uuHT4FFScvLFvr0c}!Jimr+>MOQ_cBBP>Akx@~h%BaAo zG{u?<)tZV5<%}v+n<^AfWMyEA65xbF0-S_MC`u?vC`+hFfY88T5-?c_n4APmUILt8 zfI=`O32-(d3(A6X6mW_H&R!J2i3~Uo0jDhBOhiovrXvf}J*WlFH53lQv=8bk%N*2F z0{xNGme*DOJ(T>T#C{d$^04TCX8K`h`0s<>l?ezEDgAF#YmC1s;y(@&spHFe`l#jh@!O{S`fRSz`-e?!@U9(zaMkrgBiDPwr9!0Y`L{E& z|IL|y=&d?K!EHI)^>5tPX?Bg?s@dP5u3qHIee{2SfmHSsa zhr#;-+V7O~|Gw;&0Rx}LfzR}$e?Qb)(E)4N|HW<9?TwSH1Az^=)&bJ0$Oc@iSgeEE zfNLEft%_{GwTi_$s13N*0n)0-23)IHtb^KsYaJl1ifq8Oip4sp4Y<|;(yGV?T&q~D zgW7;=9U!fWY{0dO#X6`BxYhyEs>lXht5~dq+JI{vAgzjQz_p6SI;ah})&bJ0$Oc@i zSgeEEfNLEft%_{GwTi_$s13N*0n)0-23)IHtb^KsYaJl1ifq8Oip4sp4Y<|;(yGV? zT&q~DgW7;=9U!fWY{0dO#X6`BxYhyEs>lXht5~dq+JI{vAgzjQz_p6SI;ah})&bJ0 z$Oc@iSgeEEfNLEft%_{GwTi_$s13N*0n)0-23)IHtb_Xh9xmq9Wir4Q@6<&C!9_60 zA6TuyV11`!rpCJZM|G$xdaxco>WMYxmmqyP089C%81yJehXr^d0ztYRq<48^k)9w;`2-y|65$Mf=Zu!}v8i4D2srq; zDOwqjX7w{S)dK1L;0L={-Ph1AYiNJ)<6fXFpoR81Mg82B@E-VHVekzsW#Pk!KyQS< zzvMA+?Gk4{S79wQ(#IKf3ILX`nW6=3{>S?hRQ7W6zs+1D_rD`4WxJgJ-Go|ZkS&d? zyj9Ir9y%HP3?BHoKj`&Up35BoD7gp#TYFY{yY2%3#{~c=ZeDXeTPXEq%}Obr2)Gml z(BIpC75FXrpNTc+lcJpO_q!*;+RI-@6Q?|E?C?sSLzZg!k(x<-jG>i;r~!=jRgw1T(3cbD|HFrc9H@% zHFE>d$wq*div@t%-ve_Xzsv0?lO?!{4**yRjV#xDkOuRq=^qJA6nIF3@uYrn5706- z7lvc~0+vN+DL-`JMHlJ08fGEKpoHkyaPIbUf@0O85jq?0CT_s1Oj1z zutB&Wd=MdsC`2403sHt>Ky)BSA*K);$Z?1}1O>rDPD9Q^!XeR+c*p}t1|%O+26+y7 z4QYXNLxvz@kZ%wo4U~qJhMPu!W;cyAjWW$a8Uq@08V4G88ef_qnhP}7Xl~Oa(`3^W z(>$kXpy{A_Pcu$43k9Go&@Ip%P)VpVR10bhwS~Gt{h*=HtI$|zGBg)j4y}i_LEl3s zpg(EpX}M^HX(egZXb;m`(86hbX+vovXya)g(H7Ibq-~=eqWwafamM)F1h_05dgYGlk96de#7J4yyCHlkkHuNXxgXtsa6Y2BlpV7C_f25yf zU|`^55NA+lFlKOO@Mj2PxWkamP|eWF@R{KUBMYMtqb#E?qb(zf@f>3;VN#0ieX)W}VHhn=fom+5BSjM=mJWPA)Aj zIM;cuRIXaC&)oFfV%+-NC%D78bGYAff8p88BhO>WbCM^4r;_LW7U-7UTlBYhZ;9Gc zu%&Ygkyn8CATNUV3U4lNEARYP{;dbLBDP-LnzyxME0Ir_PlwNo?*?B9U;j4RZTq&F zYzy3WZ(HrQ34U&VHGVk%RsI70p6xW-_iZ=b9=tted*k+50YL#h0Uv=o0xtw61h)tt z5IiAxOR!3COo&TJL&#I;mQb}2L6}GQps=^_ZQ+-~Uv_NYad-!I$AcYjcPxmAiCBo7 z6Uh_l-^sjFd8gaXTRUrZe%U3o%W&7}U0J(&M43dDMLk60MC(Q8cZ=<|+I?wv$?j3H ztz!COL1I~AeS6sUXzuadlf0*WFa2KSyO-b*Pww1miT`#>PqafoglPc2-+YCDb zI|nO=eUsfM3ztohZI@${)0M-?mCAjU7neuK-;?iB;8HMDxTH{{KvGmv^i#}L98=n* zbX@7K5?+~G`IvHq@+%cOl|w2xm8U8{RTWkJRSQ(7)g;xt)H2mZ_lxaE>`&eQQC&pc zS^a_fdktZY;~Muh-fQmAbkC4 zdZrE4*3-VK{Z?nQj)hLV&Va6nuDfoQ?iW1;ykv*U!-ZazyD! z$dQ)@tOjNV@dh7`?mOytwA_%!@QC3}!#*QXBa~5zF<`83eABr9nAkDjV-+U!CdW(? zOomNirl(Eo&A7~*%red9%nzAInfF@kvA|l?SaMj}TV`0!TWMQGTfMiIwhpmwu-Rtg zX;W&;U~6fcW;<)AV;5uh*%;BWNYsYPl-j0<{98S(og~#cRTOH3lzUXZ1 zoa{X7qVICoYuFbqt}7h(?UKsk=qHjk>G5-*g}I(DaD)nDEr{jQ9L{ z;_!*26Z2liUg=(BZ!7OSBqQ=TvJABu<%N2Q7DS&!xA=(rg!#PpRrQVW{o-ffmxh60 z95AK++~8oU5xWO_5jzy15s(n@BhWIi@FeF+)XBzE`%YasH4>y7^e~t<7#>`6ddKOr zr-woghCB#`gt~;*;CA9J;69(xJM-u)^I5O6P3L6JMW35FZ*{)>g206{7d~FpyOcX;j!V15%7pNk+8_yk>qP`*WN}cM%}wk ze;swb^M>Y)N70)wzq7!gpr%l{F!zb*le zQXd%!ew29%V5hFc1-Kpn#{v*?FK_{%`Mp6w3Q7y6P+B@#%0@>|N8RY@85kHD80eXp zn3|lf)6yY*)=Ju7{$-{K^#v#)!5At2)c0Ko#7v(({tz`zsv?Ld7 z)%I5ysyTl%rEs^d7$;~A)Muchqh+9{qlJP4FHV{rP}pYLgC@>&zGrVd5aFW#lK;F# z_K@jLZiXQjKe?~b1vRa^c+Mq*=H#``-xS@_rftsas$fCx0a&5nw_iD-oPZ`U*u=R_ zL|)tDatmkjPHo-Gk^i%YTW9JYY3DFW zmO_s=iuanONIa2zay7&A_G|8iLo?c+BX{qw+ZMw)qkoNfs5_*HZil}5L1rFWkMP0& z=RSvSX1D^?wwFc4cIn%Ge(NI3zc7w{nac6xX7cX+OST`om@omx>}E|?ec_xgS~d5C zi#tDCeLgaJ<)PfYuTLfylV6{#6m|OX=(x@iz1l;j@m5p%EqDPn!y$Y{)_(=Jt?Yerd?TVFgcS^3PrZ25kkjH5@-SY6!O{W<(-7Z)AQ))5q&8UJ_ z^AdqWo06U;AAo8b_QrZ%>EWER|0aHV7gT5O7~6fZ$;^wIpgRI`X&6fg-vPZc%|M1^ zD`dpXORM3uWA`=M+8o}PV=fF}k|ZA47Q=b(NLpU8!yUaC@=*F|hh~YxD>G$BA<}aj z!oe5QuYT-x(tbb&UW@A4zp|3M_Uf& zFq8~zGP?Y3CkbyEgLAvX%yjXK+J)%TK6?#GC0mb>fiau5y+?;mOPBIpO)tE3OK5W( zq69y&C%5cY+VSyc^9(_|CZ5^u6?%!9NMB&Fg&loW^?Hss$U5enhF;sNy+pG3*#NjT8BW;Pd}^MG5zA++;8YSfyN)iN z6)t_*U>}?M_%WT{2l>N?HPvlU6H=EhoZZgND4fswVAl1T!*=X!s5{#ZqvD6?BXB(r zNqynXOpYHoY{xyG=&7yeW!S4b$0lD>H$i_aNRT6_Ajz zMOIi?tNR+R>DQddt$#PTj6pxnK3%J@#x4Vvvnmfc7iSOhkcfA6crXeUKYMO`RY=xr z{9$E!VoyaXL;ux@vg>@Y4JC{_3jK;795y(YE3y877|3Qg1FO2y<=c#=8$29SN)Hy%~5Z$h1xSbsV3ha{N_u zdc|FB!wNe$mF_l^A_FI@5IfxWRv)b_D72Dna4KZH(T+<)U6%hsdkzcTlzpY|spOY? zYFmqY&4Wz#AmVXI`cx64my%bEXg~hs19E%5A9UD9I1isB_{%~!GuTEd?ogins&|6V z^T?7@mIro0JpB&GQ#EtzDaXuOzl*3nPcmAD`9q4xfN%SSvo%^pI%4|u(vviKC^Ooebh|3 z54?rgfBSSlqwL}H@{r;8v@Z_s{P-A3DxW&WC5tXke^@r%l(6qa)1kiW3)fD~=6)9N zz0-2~$WmhMM1gE}>^QQl>hWkrw&1-f3h_2)TUq}7Js-MR@XXX|gT3-u(ui1HG z>Rv4g@ind0HfCXjQO})k`%ccMphsJVrKPDcop>VCC}<{!mNobQ=LwV_fG zVxA9j+k0aAqug7zA90#G(mG^9D4p}Va3QLs*C=msw(x>G$+Qah`bK}x=2KH-pwRDr z)Y50+`}haSm1A0S*Z1Zw1qcP;I<(yE)vO**;R`RuUmKQ8pRAV})2!6TYpSEC`j+x0 zjCX_CV^eK~SN*4qm}87?J?_(UtU_!X8Ob5_CUua)?;R}-$kSY!2-dL5QGaE3C<`AF z{a&*S7_Uw&nRh2y41xv@eF?0{ZRTS(OMGqj%+RZl|EWe^-&y>@oU>^*WWfBwH&>;A z3;hX$VhF`g*;(<^^R0tUZq1jQ5C%U*&2ARX6GBfZ+ua$z%HryB=n%YeO5?SFdFb}Z zgoLt86T*w|YkiElr@1Y>T}-2s=5JybY?$XJC|%k zS!tA4pXakqu|v?n+yiBy&XSL(wWIs)xYF#OugQDnb+gTA=U4P0==orS8;d{ z|9Q-Q)Q{sZU&<0;t_-(xnE0XRf01#P4N-vG7A8eHB&55$ySuwnM!JWtK|s2@o1wdh zmhNr_P*PgD@%r(cdw#_GuC?}l)(*s5z(5#e5XDJ!Zr7;k<$M#T8mBu=y}<-RJlb{T zm&{_DywZHSGiMm^Fr(v}H;y(0T0EY875;24AU*0Y%is1bdG|KjkA2Qb3Fv#amTqO6 z8hN3&B;onGm2s50ocB@5dPn8>tUJHdMJx42kr!7Km$w*H+J}_dB1U=^G$8tBzbRhY zJ@T&Uka=t)-hr{5cm!eD6NN~%H~SydFP7xjxiu+kf!dxndu0m2P(zmysw|%ut%cGs zC*|L6G`ARF9V#PLZ`Q+W?3neQ#^aF)_q=e;mwBt}*PF%M)x}czOd~ih7Fu#rh=+pa zBT3WG8XK4!)`*VJ1dw|*4gmH+zPJy4M#sSJX=AZ*9{n27w3m3I%0N4cQWf_lj;)g3 zdBJPIyM!B{0fffSi8b)u{(KCt@HRs@A0nB<{4@%G-fHXZRFFqCoZjmHEYrDlw^L+K zq?OA5xn+CEPvS#(=lN8<)UgA%Pg>Hl{nCFp)1beL|Nj@Ssuyc^Phy*;b>^fRBwXm< z_(pDN*%>!FR9G6{*@jk?NvnGnEXd+-pF4Op_5_k!+{_uJ8RwWg{V1H?mJcm3(n6_F zPcQN?oqVLCGX{qSti-`As6ECvoz<)h%uw@bH;x4Z5eBKq*Dg2%{p}ua;WA7OOI^D+@i@W_G zjbkFZW~3(*;)M00Pli()>cItVuk;FwuUfis$BYxAii~!D$eO1FO;vGPU7G^F?XK(A zO83X+m0$@+wKv5k1-sXRb}`CdkxVGY?XT$o0(v4!|@m1$!z1cz3ls9 ztlwxm=RBi+=w=1{;iYFZ8E0Sn$2cy29#OPU?^LD}jy7)5OpG)j@LcDpD;2fX9=FwC z-zctTcSYLzLMlU1HdJ3fc;_r4?><*B* zS2VF*cBiObIaP*TcaE=HaeDU!*E-epQ`0}b1iguMMm!%eh`;qYupZqGF8zi16Y&o2 zy8?d>pk5b>IL~;rEB7euc6ze$@l!MAz<1_PP(q|tQ zXUNgqZUf_g236_VC3F^C6LpSt&^GSFMXYiZoUK%wEG9$I+~q4m--0jA9N#EUl;t{! z%bP%F6OyeKrfc%b;+ppUqHB@L!njkLeR} z67QdnjBD3Z2RD}F0>!x~gjX}!B(EoENN3*|8=1R&Gu>_8oA2$Wpw?|O>z~f2nXJ0W zW1LmO2$|Gh>PQs~Rxn=$rd*=vw=yx7?cyN#IHDH#XX1!Q#CHhiA4BgRK$6W0A7Vn8 z#wJ!${%GZR#1~jsl-hPvM`J$3)d?wJ&VxzA+Hxg_gjF|Xa z41spJ5ZLdHG?`M+;>!Q^#*uL?`H1p*>e%(%)!1A; za0NFrt{Vr@tEIL2XJ|#7SUF*)F=iCdAKjmt^|UFi1Bo7rqvkoAF+=OCC-}4#lP_2~ zT)J)0DuLxn;?#l_7<1_>WYW&@yMr~$3pnCBYHQg>=j6^=0a_w`9MY~UsqKGb zpYJR4%Mbiiu{>cENGmb%5#suX3$bL$+=_{@Hz?P-y8zgB&>fsw&0sRzIo$-K-uz*! zZrXZKQGsfv%-XYP%~^l$IrgP9^vD6^D)+0#iAzG%x@}J zc{5z;?5nD=ct3Vw!;QN5;_rh-w0u)Y`fM;G1Wu@MTgMX@V62s*ali;I8j=*gae)Pt zUGA!d{7#!UG}h~e_ki#1&gN0?rPu53^@iwJJ;ZQAK4UXBgYkG42gkZ&Y$j?jsu{N} zcDLUc_BV-7j@1ueYt4H*7ctysXxT9mG4G9cV7k{pIoE4{VfgdxB20pwi`>w zGw4(9;+g!am%KrmVV|JO;FHt@ZNXkFugLSbsq-iDRbAL=v`#4Q5rf}4^0El>fWPd( z1WIdTvgm-)*0RW-(-ls{H=U4l^POfDYb(5iqP+jYg!|dJh(>%3dXDKIa4*t7HjdQs zB)^u6Fkg3Bd{fR3`VsNcx2r{#aNGA6=0dt5D2_&9n5(O%5!eLE%w)~$@Rx6Sr8Cq! zK_LaV!3AIa&a-Qteu7H-x`879!bJJ{^(j2Nj*;J01b4FDWb`QTIe2?CvH{r)g6yEm zLucI!I8*BlwUyYo2?SX66-vS47ocfvQ5(@N{H*UVyR++zjH9W3Dq5ThdeoT~dPJxI>StSi z>t8V}!>jL1@2BmuL{IYvESOEHE2n;1!sP)+jQ$f3ukW|s zl$$vV*#;YXT(&O`NaLyy2o3fXQ6z)|RI*V)Buv#9L%RFjGt5L#{hzFQigf+Bn*ZkR zg@xy-+ccLrBz(&G7Y2KoGIbt(4EaMPyhDLUs8IrY)^&yZ$1M5506m;IZtR9DsDA*D zIrv$5)tkrWjXNi$;^1L}u-tH+{@B;j;@srR9lRXxY^Hm~3ULO?ducAf4!?5uESJ7B zRte`*lR_Y})sgbogq!s$sf<;(_>vRFrCTA$jVd-*S&5p2KrCZ;|3QvN4$+zCj}CCV?OrVbAgp~E&lkm!f&9g&NucH-+?K3l%~Vg>nj6$w{Y*%% z2}}yz!K2$rFov>@rMY*AeH~7MAm9w3R_+|ko&IRGlsva|N|xBQr=24O#d#asXaQ=z zkc(|gGa_Nf%4vTPw^vn5Oag+Wb;B>og;x? zma1R1jsx$zN{Kh>{PWl|J?ZE}J1a@XVu`W65$2(-&1;rQ>n1^CylSszoimu<0D?p` z-t=0MP}A`a0OVkd&<#$pxrXZbvZy)1GPkK9Bf4;3B6^B3^1PJgzd0LoPXL^y!X7Tn z^JmqC2~@jkZ0tdl4^4S}U38ij=l<~*u&qZVF=Ha(hv&nU&7$d5m_K%RPjBkF9Y6(h z^1}=hHog@8nf3F3VctrnoPySW-){JEe>-|xAyvswH$(U-{1J`-cesO*x-_P{yo}*> zLJv&;!rM!P&H^tt<@aiKw{QO!MnhDw^10O)>9r%$``d|hSd@j{$Rytd&(N-%QTrQZ z>!@2pJ*|@#rg?rVTH`YSdk*f;rF)$N$gs}AW$bd6BJf!HI#;LMzT*K?OUYJ#ztK+V zF({*2WWWn!IO9#){4dPGzd_hi#x(udz~iqBp`W23`c-0hMg&IqTdq-})Q z;r)A_S7Feh@?jGAN*=v8uAhO;H%y;sLc zVz;66t##OAhB^t)G98YdTIB=zulQI<(V;O$nyPX~%!^9w!km%YPatK}`la_Eu%KLn z$^}R4tgGiIGXec}670KoZRqq-q;u4X)BWavRV9sQrHU2ppnB43aK*y40?ELy*ladxAp|!Y154}@ppkqKp)EyRqU*+?0c@4g#nrDv1)hG z3!bT_i{a$?8~+{7sn~GJ)x57o6b_t!&Snd6+HHy zN&5YB59j`V2EvbcF|tUrG|VKS*`Vdpzc6g8CZ+0Ls2g*jh#Ta|*X_>bj*C{bh4VbA zYLg7;st|BoV0n?kYtkCR!uu(KlP-qIhgBD?egk@%Rv(gD7?gT?bSE;aUnfYT7}QLJ z5&rqE^G$g1FHBi?L^59qeI5|@b@PZL20%gkD?tIa_uBHvL^##rDwJV;@;D z8;Sdk1m52=yJ=xbK`-X3H(JI|#4*UYVzs^Yc4<*xb^u2Al>`+5#F-1?b+G;c2L`t7 zp95bJTlqB^x|u;rMP!2Wb*6KIvMP*0;M(iaXQjlv_}S}&&_}n_1iyE+H-lVHvg)P7 zIs@Yb3+<(mCRC?S9{U|>eFW+bFLp$(eoC_elvf8y^PP$Y zeVIf?*##U?N~V{97Ad7$WJYn9NTOqT&-1390L-lUR9S+$Ic>EPu>9a&2^%}bmbUg> z5N7}TIm&4pvJ~{DflfMi4%$&qIrMKNTJ%Spy3kD*b!E>BCs-h)@@+7FSEBd}~h1Q9OvUY#X5BYqH;8 zm~^cVEyq71Iku>_KJ?)|omH8dznOLX$2x}n@AChM#t7(4-WPGw>4%*?>i{uYJ1q!Z z1l&Q)`^a!B)cGMAQlXr5*-?PY#2t{8WAG1QKm(aN-@>`o%sE^qQ2eTMe%-*AHCF z@;bHt0k2$n1_|%I0JNGMSXNB!;q+QcfEKkDltYE}Wjkaoe}o=*dSQ4DxuzZUWPH8` zdDMTqBTM&-Njc*C9z+lFWo`T*8P6R*>XJWntyCpRij;W$;t3AeWBX2&kdPl!(-ZtV z>FEC7NX`_6hAp#kU)BHkOA`e6vC=B_k|IC>ukgk5tA$la*;$FRV5xzdO72eTV^^&G zh*MU>@Y=W1-TOp(6IIY}W2e>4t$kfp-_Bn5(uD=BS@tfXyHDTDCCA=qX9UkZjNDV+ zXLMm8;+Zz(XRGwkqAZR_7D@;NY|3J+~zoz4W;O4P?C`g!k?2Uu>mR-f2R6rhV7Fh9emmdX0?^ z{-4@MI#e_8Rc;%3Ih|@NN4J%kHxw8pH>Fq2hU4YEE>cerieZ2w5rgfR(U0*C&%xsm z8@>qUUTgNLemJVDY1bC%`0RO=SGSE+;t>MteUQ|lgEeEAsnhfpiStA#m5P_C6Wa^$ zATb?K3#+4H6opjQql#I6rq;h?)(K49mvMqBmc)nj$}jCyBT3goCOw3Zx0gl?&1_g` z^?YK#VvJac{EQiH^w-HUt#&ivtG&NZt^Nz+L6Ta6Gf9}fRI2D3QR-;?qO*m$7*`?g zi{P-*=swx@%q%tC_U<`>!+aa6b(oX?9myPd<}ZwBeJ4^C?}%VRsATDtSr!WbhwAL- z9{&_aBJ%?z=MzcXX8(kv_vfeBGoszCVMEgvJUKt+Nbrxtk98}P6HIukg40F+B;YGX zK-i7N3yr@QQ;;g}!oFD*kwZgTq&<}d>+DHX&>+qZOAWp$lT16HW8X$6!%RX;O8xDZ zDaZ#ty8aZc@%!2tGr-@s%c!0Nc2sH9g|Xu=j4I!mNK(4mXrZc6$XmPiTy(Hbb$=Wv zWz5C6t0>mOoLBHCpeo1*>E=j0W|N-t;x3DQ6m#6NpKybc#eNtNb8$4cHR6!h4`y#Ul-4F2W^jV!C!&I}K)_kEhjw>$~W7FDJT8L_q zl2%A0udXUTs}fhUGv5vcTs@=mFm=Bpdb54(1>6AeOM}aM8vUWx9`r8^hn{=dkKkdB zT_kss31Jm<`D%`_uc&5x%K| z1KsPr%)kZF4t9Rk>NzR>FBQSv^f>*B-CgDXf@Q;rHokcp;4!CTxqxHR(o*FR2yZ~z zxqpQQ-S(7ZaHft}C@yXZUaghu@=}peUj2X}Zdt8PY+2L{tCjl6x(h=4+tJ8cF$2$D zCUR=jZMaB57Z*h%*$-RQ!#JtmqCV|WLS!s*S1W|GJC3#E<=##s%W>XiT77|%=2h=~ z=(kfM$zoYFyrD&U><{7KDgWGx68?h#a~OkfZl|+!R%)f0YuiGFIsRWU!X0lf7!jWM zO?|WLtca0VD~OwqZ81{Xp}o~fe(L%&W+2ZV)}+{rK%4hmbWCF=yD?}Vw9PN#Y+#ic zW(X{J-X6X8QHG|5yfX?XB((0+Po+4_Y|rcGH+8P%g}unPx%mXZ%NA($z2a`lJFyi5Fkn(Lv&k~a>C&N4^4=u(X{3Wi2l=$_6==|-GoGB6;n!Qo=0#W_aspAj$L~nD2~cOz?;HB{EdBM`VD-YdwQp~llSDTS}wF%a!x46aTc&}mY~$I zwr@6UxpuAMyqILLFUq!kYv((njPXm%ADA9jJND&)nzqcVx|Uez&dDCa<1_*NpdpFz zg_>TtR8}bCC}7h3IT-X-t2(9z$Bkgr0{EO6YA{}EMIpa!*m6*Nw@nA&vtyjx38ow= zRBYn9T@*}gMd<#{hueS-3oC(b*7?|2+~sA>XK4+)hVq`&I_*d*!b^~ll1)=#3auI_iX# zOqB8AoU-9q`66tt>U(wl*y-t=ejjIfyJbJeCkt<5>=Al`z|*I}F=Va_O zM@didsx|oMuY@Cw(74e*iiqf~Y?jI4bhZDGvul>;K--=GRL;b%>3bs5?&8^Ml$f%;7I)s-Q1-#M)SU3)=emMB?#Wi? zA)f3wl_G3HvqXH>sbE8R*ASEUOMCx6N^WQHLi}C)q6$BTQeOmNdUr3&roCfvQ8LZj z_97APic_OY?+}PqP4<6NW^nN90W+%`sPK(MU9FYlJICk2c zbL!tr+NG%!PX3wHq4Bz)uv{FVC(@-){DkHOr&G4up(7!|WN-pvqRK|ji?movdvSw| zhiFByZfxYfFeDLuOw2HGD;GOS{*I$+x6eB$q!Gp-@kvW!yZ6_=W5edf zp=ZM}b=@j{V(Q|d=PYky@t#b2Y#!~{0n}_8OdJOxK;Sojw5z2lNJO;ktT_1wZ5_-o zD_A7d4^h&0dvzSL%p~#k$pOmh;GTsq^^2L}G=V`ss%H8gkd*6X{A~2uTVPaUGJqKe zZFr8u#T$T5Q8xcb>F=+|t|7diYIEDT)*TkydD8kn{ygquPH(Pm5`8?4-?XClB-5@E zr&1`tSQ6R%F38d{xR}nWUI>xeBXNB38Dig-pK+C4Y$I?<*p2NUYuY>4o z-xcyJIFTyGDqef?AZW~&F!5}Yk8GLuI-PIDaG3w$9BO5C;Lv`cam`{LU-wHXjP;rH zSW{9Ry53cljCq6Zw?a<^iy*t#n)TR2FpznW6pV0?^tlzT$cMD+`%Co*@B$|&Syhtg;KV3H>gRKW(qu2=bkiLu{}qs`K2g`~f(bO41gt7@ znSi7dLtnHBojbrRZP^|ObG{y{c{5h~#toDbszy0VFX|Gu=MKJUzqccE46`uxIN^GD z^^OE52}U!wt@7Fiwe~F&EydwbkCoOvSFcdfp`|3YlGVU-%7i^!%>3dko!l%~_BEq>~;P|pgly!CwY|$T( zb{)SQ3StTrByV%ha5yoJ*6`aP{)kbI!y-ySECyrUpJ+Uh2DVP>@yPk5KCGBaD5 zI%$SIs^^kTqWfuMZEpNoMsLpuS7v(MnEM+2{IMSgQo;~kE`_awzw!$Z% zvO+rjI!|*PDT}=O4RFtBa_3zEF}AlVY%B{~AUvDk$p4iUTvgsZIZB9xzVFrL5pZ$< z8fuD+R~fqOEi0WbIknbOd>Q}q+cqGK_ojO%gA}o!n(7P1ZjB$oDJc4mE@PRz+s{w_} z-^CBZGODop@X=iMXUcr{qj@))S_v;!j=tKoGdIaA~6_}w4OFg>P{2A1hSs<>< zlC);^wsvu`+vDqwsqBX`zGz3~yd$m=5r39&IeT_1e1V+eH6tvx8%>PohU3cEQn*ZvHT$0LGl;c(WP2wl{lObvN`N@pO)YeM^ao0 zDQ!s;wdlwmwft{Oe&$cq$<2gqWBA=>0b(2tlhIMwS5O={Dlp|xn4{Zykv&yOU5lEx zGg?BbbHsP{v`^FC(@VWo#+M5DXSk~QrXOgW#;2}hHoYBg987z(v~Dgs7A^2 zf)<@k0!q`)4DZl@?gAU`!{-MVLl27Mnm?>svpMHCuilRRkU-GOdd<2L}xAZCM%^+nxDr zt}Er|XGn@o%R+0(re9qz~@Vs%h*vAnCOT0-G6 z={s1&0gr34p0#yE#ZEFj>wa873Hr3bSY_*XOJ-D~xIDz|5e5&quM_LraJfImVj~cl zjv#f5=~_@u|LvyLls5+O)Vq|Hl2-*pMkU2-F5LaLx_^FoxV79GfkO=C02(+!`v^uK z&+E1;+lg~0zkgPiOgV3Elw`h9P*qQe?>dl>i`UgSYOgoJ|K&_EzR{)Mt-_BfC-2!l zd++iWMrG3FACjm7bvNNzo3)uM7dN0I4-L*ephT z83CG>#8O5&N%|sFbh~Ox+O$w{^y<;DY0)>^w9T6_(5996;9N!{f34vT_6F;^XQ$}Q zboSn2%{pE@pp3+-5>d4&!t}S1f~nt$ka9TR5IEA??Glxrb1_C}vhAnsM?gzwC26%A zz{GU2fvAtyRq^K;@W?xLYg^fciaVJ|lL|$nmAtenq2;7PPw2D}jsSqO>^nCfzN~sK za=AH)*)FhwSan@Ohr}=fJYQeM_;S!%_ON9mQl)ZGidFvEUNN!?b{GCd;VIA8Bwn?2 z$sThVXx_e!@>0Jnuz%#m!}-jUP6;DqiTcm82gW>q*fU7_xyLdXyVEj7kB5iu3+C*% zg@5@}Z^NcX9P;{1ds2$-=J{XP@Bh>fDhWg>KP*D2#_!6bFMq&zp5FFgK>VM$e=~=k zkTO$09@xe`+g-t>i}wG8VNug`^qJ=({pWGY&1@>7!g>Vvu9Dx7ZrjLg_54hflEE%U60GF*xHQQW=w9mhMyIS_&vQnS@zU% zHk%1EE;RZa3=T3#6UIo^Q~08=T=}dv9oZ}B9dNRpXxo3QKUNwAcxEiN)vmRA>oe`U z3hA%CSGxzQ{)L&iHsz%9tCZ7rYM|*3>|j>^;ag`uMglx^$}rmP?}WIrO8b41bX1L4 zbf2&?{=D)KhN#;pE2qR1cikY8|HJE^W)}-5Q^uUrGM3@(RZwZ&q^jrp_v#$(_*xO8@Z} z#y{r4qEG&fWDXc=dAI2n=-F2-5YQ-YzXRW9kOk;y64u)@epem)bK}pgX4q(=)VKv1 z#XvBzVsLuq`>kVwf>0@60poeSB+5VOyxq_g?K+pgj2`bDI&F_p&#L zGyC!FCX^38s@U}6l()^w_fYt|p^3~0$L9sa6BS+Bk?*4usYFr>3b)S46uxiS5tY1i zOZEtq2L0TZ!u|wl&%9&xz;{z`^MKEj8WP`l1V1bY?BWsxTL{iFe^uS>c1sXVF`Y=J zS5Tlr!(~{6rcqzez-mh<^6(%;J50=7Z$2MewJ>ldJp)$HUSQ2j-Rr!I_S;0>oPK;f z`#fXgK^-M_Uc*!t5Pq{6WS=4yf79IQyj%N=w%d#-<&D)cHBhz8xUjOd5(TV%B96=F zd6%tbUbaBcCNh4W+ne-Cy=RtL|D#9pIc8dLEUky(wJdWX;;M|}N+FQFgABq{Jplzz zyxw?QstX)pY&mg|D7S5kG&_eT7|#`=NPNL%W2Au9)ivK0+O~IBI849!L$>Ac%hB$a zeG+<~ov?ji1VT!h@dy4mCK7$DNXHkH;19x*ACcWG=8~?-{8B?Jhr()oJct8Gm{CD! zdXzm(vv}idZPA_J7i9VszsDzgC?dWuj;anV6qD;~x1HB0>*qSn@J(yA@Y|d&LRCmt zZ09$W!1UE+2JxW)-u$~`DOExR52Q=|^>$~8tlqG2?^= zp>ks>+P7Nkb$DE!CJI)H$N^4HidTyWQXpoA?7W9|{TGIsGnB}$?lco3qwX1DtBs=_ z5@l>rdb@R0dCVRok2bhzU$0wEU`LL8p;ds=9}Eq*qlRhu!goeD7}qcaZF{kEEHnA8 zihOTbG)x+`t%p-$mPT&|&NI(eMHj{=?ld%t1QLfcX-~iWIFE^JR(REPX*a!QHee~Kdi=5=r+?nQYue4H}rohTj;97`n^nEGXf#niM5!r593mk|*${aGa@@@Sfz+zhb`$CY#P2 zN3WUh)>Li9H>dCd+ef7zW7Gxk-(^1*jl$bW>9jHktRujxN3B}gP%@ko8(Y>%!3+hf zLUn`3J6uRRHvd8VaVhzmoM_QaTx*2XQ+RBW3b*~P5Xf+~6CV$5(Vx1aqZrCMPHL}h z@YkBC+0m;&oy3`fMIm5Y*~y1m^$`-w?{T7)f>g^i)G(XH1jY`oDx^u z5q4?#tfM@hfIS+G$VuE^7@F;Ng;wU2dbmY|VUTWThD_~HOWHIrjkRbXnMrY(=A8}C z>e7@zoj$n3T}FLy0N&4aV>|wQ-HoV#NOfYNG|Ouw&&?UG{wL#lL-muX%@u4 zWVV#KbKULM7y?Qr)`zqL1W_8Q7cSF{3oXI`7l_mc9|S?$(ZcWoe9Kw+MA+ zd%Im>bqtFWJw~Jc5Vg5S!LRjttIq2KwipjYiQ# zn2mIK?cQC}j=(d*vToA~k(y+cW>bO{?0Wi!@D@__z>~MTGcoG+2D3wQdw2S^t63-U zHf&Ziyq(>!T@)B%A0GklzhRv|5aJGvu#A~M&5Sr5qugl6(=f`sjtgy39r%0^;vN2P zsQiDsc;;>o;uCISzn#IfLFsO)SJMULw}3K;hiD~i3%|XLNQBbN1@<0uy3^W@(_?Gz zF_CCCpP}CrxANjRgNy*@mYcmT&D-5O1I#TRQ3U@y$acM23hJ(K>v;KCUwZ3Z!E!As zSEA`bsOHLdE)!S@o!BxXsxVH4z}BhEUdKzyU44m7HEjHl_eV#Mfyk9cf2p-BkN4VW z%W%akzwo?N9`Shx?2Ei_QnwPV1EF#@{ly3%D=c%|Q5{E;PyK(`_- zA#NI}34z<* zC(ZydM?ms#TN^^$-a0`*PQQN5je$o1}7*QXtwW|*kW3hV=1yR zeM=3UV~Q~HosWGAIPmzG43EA_4AbksKn4n3#&`Vf>|1J*${l>Sj<_v-5gj3%W-;;7 z(LKFa%}4v~*W6>eGFBQUZI2`+CxFM#-z!{8DOSo|!_dH2p`dx+;=`*Tm0Ho#nBhDWp~L#CtRm<%PS1 z6WM%C@d;Z1@pGBWlO7WOP(tU=OKP}{eb8?gUp}^Yk)NBwxu5zOJ!ExI{d&5~spBP; zcJdf>Rf@KTDiAAWd{XMcoH&uea2%f{zO0^`)-xR7Yj_%Uh2K3{THTIxND093 zeK1LzNc>A3F?C~JIm7( ztS+e%5miN|J+N7-xqtATl(drHAPl$3c5RX6XUnm_G}NOsLFe-yevR374BnS*oipMW7Kaqi+%JF;E24dv z2Rg@rKW8&~?AkviEeA#rrm-HgFz$yZ1~Y9o;IT3q>FBhCN(kwd7+$~ya8<79CQ;@K zI4h;peaz!AvEeXnE2Jsj#x-W6?8L$?L@okjssgp9=}$~2Q~8#{@UHxOds`$_TVrCt z8pZN2HARfKg?LFFJ}WIt*SR%Cxw7VH1D16LxwCQIlk1o>@c(xv` zf3(%;C;Q`^7UiU9axwj8YbY`=4dz{>QygdbDeWIz5J$^ZXeu3y)t-$vOIE9-$K0l1Vwnt0&?Rd1SYHKtkf z!&(b2LzU-tNOjfsq7>_7YUf=k3Ei57TLeLd?YVzoh{C$JiOB?p1ersAq>3_EYSYbJhP8Q4r?T&AT{ zo>UG7Z3w~Gg5f26z0UGe1JuWXP!d;Ex+n6)=9w?!rH3N58Blj6#72a{5C7v`i9D10GKp~|`Evc3Cm+-${Jib~{ zyRmZ|2KF7L&uH`)+}mt!DRKFuD}C9njnTlNny~fL)ii7Q9cV_a0YO_vEaec};31yBTY&8^hR2=QIF_L}eMiEt5vkERrf=loXEqQ&;m(g7bM2M|azR zUH2uNWy{?)UC|3%k0I6WuMwlz#`Ze-YR!r`wl7>xV>z4${C&BezNnbU_Ln%L`Z>BDk8lTMf^)oax__tfg3;W`_FUhE3J@0E>W@f=o zs@)axPXE=>bp+!>&op=HF73`un@e+|J>Kjb9z z0TwOa%x{n8hveT}OdT7RmB1p{w()Sn5Mh8TNTzJ{QvtR-GBELkG#OkoZzU0%|6ca)lhuU$k7vCmBdkUNQ+C zkJVg7fpc?B@OO`&AERM*cmp}&|8V*{Gh0XGT0&56gb^rV$J2FNZ&$777{a>2m9!{4 zr!skgUKz`r?e?4y#|cfek1TKEjUe*bzcBKiezl&H8^rg+F$+^NKc*eeC)VO7I_$-FirjRe~vIljGC4_(m=#@Ce_?KAFiJ=8fT1V1uu_FCY zHj3ZS(pW~;&?{2Aoq5VyBdVtD>-cV9Cn4>KQr@@I_XBdNcGe@%??>%@F(MgeS~_!U z>B*)y>Lw!myNdME8a$yit^5siAvlj8GI^)FA{n_VeCa34H(@B?Ax$Sh~DyW?hdFjU=rh-!_coD=k(pabSxB)WAvZ+^iXA(%*df{~`B>(y3#C)m|?2H&6h!N3BA$lx5D* z`H9xo5Fza!yyfw=_y~MUFJ4t(cfKh+;N{CV_l%orav_Vg+D(>G?i?B)NPw}3JWY>N znzPhrD+fCmNb5=>UT2Q1P|~7lT9gUQNU7uIQkV1ZICOkx;SYQ0g|9E9yQ`le=g29A zqNEA#PpC2fZVym-X6qecT}P2BOXRA;B7z3PgbrMr4bRB|dr3DF-z|6f*+w9gGBR^a0*Y87}CSFB- zH~ALhO+m#w*jGI)gIzowlM-9ID65?Xr0VJ{?Wj&w(-pW#n||f)XET-$uk8dZpAUZo z<{^GhUq}wexqfTa@Wt}nKXE$fqFm|mQT_Q*7;pY;SNm75R%~G3zqpW{ee32Df|LCH zsqFjRa_8rRE;dTds+6HmkxWoiE3QG}eMoBy+%K<|kBq0Q4-WHb_zs>d$y8>lTZCcZ zi|(=oLal(yZKrXgxk^T>q^=Vp%dT|Bh14ad%q%&s0i}VDjCrFF%%kqrO*QpIp|E_e zUtRgW>2Pyj7`#jfq|(bO;65771&FjB+Slf6oYO`*(qxP}kQKwduqldoAdugrQa%CTB6Jsi6e2;$5RU#U-0!jbC<*abp3%z1i7 z4||CKL(~lz2bC5Yuz!xLmSZ0NTZ8_mHQc-d5tGOmO5Zj;h}3IiPj2vO1UK6xUzfDK zj}U9Bccuzx{Ufs3x&$+2uw$CyCAm|7P87_JzWD7IJUe-NN0>K!71;!UUWi2NfW;gO zuqa7)2p#7-#l2z3r+YQoaTu}ol$KtSx#2drWLwH-5cA3&MKW`LO&h~^b9G8MHW?-L z2Dy;b4Si`5<3WvVZ&aIDR`y`aE@&;#s!n>Q?aVg5gG3v=RH;Pl=jU1Mr^@-gm{l$* zi8N;9Usoje!n-KwCIu4+C-HHJGqHrzFahh+ot@@wE8|MC7%WdAd7pXqkYA}=n#1$dC%g-x`{Z(o7`PXD|ZIt z!Ht6(_@(4uypO<)pU#B4b$&}Z49G|~g8)+4+4k3`&kqEt01w(vG)wWhwKF z3de8dD~>Hgxk|5X(;e&IVr}{T{qe@;Fc>LH5tyq2rQPaB<+M@@fUaD-GMHAB*2?N} zL!k{c3&9qPWjQ_-II3D*EJ$EFv$g-Kz4+RISm|SqNE=(jjTO_KYP#HXd3UtmLYB{#2yqZG)WH%mHOt*T9q38wWzz} znij5c`HzWH?8WWlsOhqNdC|g({|<)LaM|K`Gfj~!`c0oUVMW!A!`mEQbg|&47vuE| z!*>PZs;h^0=WtIGeQ9;>KvP!qDJb>puQqln9D8mz&mcuX_O#Usb`F+t#(mV+(^0`M zj{8`g@nX6gOP}dm`Kr$qxqph!Xpxc_t9yq}1R0CofI49^%vF9XJvUH~tZO^&*1m#O z5z-|X|86FVJif#4@dT30JTPswsP78Cq9+Syc|u@z2$H2cZ*6EMx0ed6=+DLt!ego( zEGze!01Cd~;agC_ZZtOrf)KWh*Of+eHw}bKC?*Y_;jkdu^QKCP!npG3H>T2`jUzR#v=0iW?!V{n_ng88;iaC1^k3_Y{LQhJQQ z?-GPj_1gxH*@`e{QLI%zI5?#lwulhn!~$0NDdP;eItsPz#BtP!gmm!A;~#KJ?+&y--Wn6;VOJm z#ckBVrh&7bE05L=7hBh&{32#tz7z%~2)(ovlrrE0E&1F`H^Y9OxI)#U#EqC&Rs6+CaAYi3J7QG{a1;fX#<&{srw^F`T#jQPu!zH%(AfD4!nP_!v*oGjG^l@SV1p&DthL(M)+L!Q~VO2^G4AVIk!+!xZ;9D6NmR+di5aKZ*w&?hm=lyyq|1)JMYTHvFvMfW;yHIRady?mc--i)sTPgPY zaX7IIVRKuPba$hE`9fUK7``V!+%}VHC2ng`)-iTV0T=eU0FzH z#Wyn`qRiq*HQWD<=&*QEbyC_Dj(tIUW@gg=Dn1MN&;OOmtV_N@PGWRcbUA}(>0T$2 z&O>?Re?dzsP#TEpzh*d%#*%Mi#U z^VsR?lKq>|!|1(*V)@z|+p357r+k3UVwXXRblefxu;h4#k^T`vt3I-#j?ux{noC(D zKTNfy=z3oLo4b%c#0uXOl0+YG3pt1i!Emgb1jNgQu<3NQnWQRNx-?fG|5X#n0{wB6 z*!|Y*M%ZiLe6rtSY$ubs*(vgAw|k5~qHxVJIe3(j6!H-ee$bg%Pp(It!oLr_4^4MV{qDJkC-0m! zcVk)3TqbbKEjY`TK3SOeG8r`A$x?KmTwTo6!uNqO>X}%@OpuPAvT3d8gf;&98;Y{F zj0g04Y#dy1HSbBlkXPSAh1^%?eNu|xD7*f=45dsg33wB6G6#j|eWT|24Iz9NkISn| zWV?p4Vfa-YE3c#oQoT||G?K5C&m6o8^gC$8WN1H1%uZ|#1zP1p?rAa;;g}w(k`|(I z`i}6322y|vhS@D!bJQeV0y_BCR!Tz-dgGuJchxx>kj_1zW6J>|@IMFH*eDJvaQBn; z#oYAg&ZmrbJbnK%lyeeL7PsBq_zVVRxroY(Xx)1qA*2N_l-^k(bvo`UGbut!dAC(s z6cN2HJz?~po;)#$FhLEe2aVLe*f+tvW^eGSh4_V4hIO{K^I%vB6wKTs@Pxu$bzgaZ z$_6Diasi|Jrteu765uFTResXZhE=1ju_F?w4-eUE8tfk6B9R9Tu4ddWTfzN+$I;Ti zZHV2`F>&6>y|(bq;CtuEv!5}9h=VYD`x`U9QPM*G9xYfj*qVO_xnNQ<%i*<}O@uon zyq+sDqv`ABiFPu6U$SFDIzm)?zL!_|HFD7|L@$!R-yqDUxM+7dr?6UhBNqoP6c#1` zsr=ga&-D!{g=rLW?;#&16eR-USKuk#1|}NhJn8P#ec8(xvmwd8rH#E{6#TdIw6^m# zJA8z@&=KD}C2%*YpL_c>-D@ph`>~8ozqHH=HxeX$Sad4(s+K2nKlvc=ZMB_;e)m$M zt9&kDop_3QJtFKc<&H6IY$k?WKr82J9U>Y^pVTHmq@4!W`J#VQexm)IV&2teyLZml z{)4)m{fE|xE*6Dh#>pWxLq4^%Cu129UxXHyYhhO~zq@4UtL^ZUy2)zPArzBBHfqt% z7<#nOTU_^>w3uh)-apIhZr;s=aWB3>n-;yl*kGe{62`aNIx(k{LqartBg)uUJ`3!r z_!Ho5&^%~2Re?+3m|d3AXa{bjWE%_{b$#!Q{|V~zDmt~gjuQ84+KUY$)pm>z-)51p zTNY=knLIa5$NvY(Q%@BKgfJ;N4&`6FWt(AR8XsIWHQ}D%YyJ7udeG-yI#J7^a@ulM zbQ+yZ)%fM3B`|Ju2B-3SEa@i{WEq4HF7dfYVY<~7>c+?k{WD8ntGmZsR^j8k{aE{o z!+g8#6!RcXZMOGPx&D$2*~kj}8}+pfMg8YS)$F0Q2()a!sGJKz``l0`hr4G6%bRz8 zX%x0%h@$Ui2Q+-hz`CW!pUxr(z#IrplY`fxyiY$t^UFMD3l}ANJ=AVz;$8Of;@RS! zX-4wY#`a*{ z?o@er{&e4sF`F2#b$2w9(-}1aXn1ck|LE5DUn8j__T#vspH+2 zG((TCp60iQ9~4rTinvA$0|cu`da@!z!>xuP{)4wnmzyjF< zNheY4F6ZxKn)0cC`MLv#l8P%`^#4byl2Oj&zjKw+=Tl^#|H1ZTbK0-Kadm+MdN#zA zl_<+vHd2(&(5D-!d)fOmI(@}_7Sq}dhRu;veSIwhv9@+i+b|L`f{>=FAjPr;d->`^ z6Jmj5x9Ft@Yt9I(Sormnq|0^CknsBEfjf+Z@gZ%yGWEwg@s zG{=;ZTR1~h;V=zVvI!v6$IG_IE*SQidvfcK(S2_OMZS_qhmF4TTf-kt!`ZtJ&z{RBL&JkBHTb8fP&J18+$js^ukH~lI zL_1_fA8r-xg*kQ$aV*E;BtA>3{?^|xs)LuLy z_12`0zMrS_(q0K{qtZ=SWyUPi@UY3NVV8bpESdM>S}I6%$yGQlvE(YPcvz6HNAoS^ z(Sr7YT0ILDAMfiZ?fh2kIAk4JSZolshdq11f(u0XA9K1M;}>2 z{W#^7l{iU2VyX>a_M+kTFLDlgcw3a3aZOvO5k6%ug&Rd5$}shE>>&Pa-eeqHC7W_)3A;pD_y-0jn^t81u3%;p7OI-M*~R!JY6fw*Pq+>fGX6xrm1+DHCyxxsESEI-+EwZ%cc7tBrN6X?&~?B zH)94U)#1}*mHyfHp*{Ght{7%(b)T}TS*>ip6E}`K*UZju$|q69Tn(cu$xgqKm%5sp zNBU47HST*KxJqV!b;spe&3hSoyF13KKUil^y6;k&4IxeHd+*YGT1v*)P(sb20aDV0 z$(3&@ENglJ_!z#%+j0wr$VrnzNa37OTp_`YT*z|w3;R{yvPjoogj~X_I#zhLn_|s` zj~tab{*fvfx-$h_{bg`Nr%F1f;athtEfhEfrkEg_(-sK#x5O6pJz>15p5+eG>eN1X zttCc6t?YriVCl=*#FZ)>hv6cVs>?HQtX0I6g(6UavyK|@nq3qr!452mf*7YXuCAoq z5T2VxAtt*>O{U?#e%vO2kfQ=2Hl47(`Vu2SU00+Dd(R2ucEIyZe z#Dw6En5uZAd`21;->HY4E0lW4Ql5FOT&so5v<%X%Rt!yQ3Z8uvrQvOw7`f`nApv4q0!K848z_>MNmv7z$Iqb zYp^$tDtM`aJA8R}b&8L%?R^2ef&T#qC!nb1q=GG3y-5B3NUocZ8g;e4`#wT*53W=A zzvZ0&E7e$sb3<+cIO%O6mj0T-9Mlcg{AXx;@8xAH^zXIihe+}8P-6i{h5Wto0UM`t z7BjxOW;d%o`$N`Am}R};V5&`*$J^pJ69pLB7P%XD#AI!~eDx`s(r}?Fl@#Ux0NedU z#)f}oqoe&}#A}Eg;T{x^>d}IcC%59{&>+&t?e64?RCa#4~Jz$n-g`*eLbM^DtqOa=0hR7G}ziq?8Nl;!7;MgZ+LxVQEI~uzo!EFnqqJ>TPrYC<@ zdJ_(8RYzdBBpd3*wb$qKZ0_G?{$FgN}T?jG|MQt8W1`pUqIYhSjp!; zbq-Tvj+CHy{WI^E6G4g310VEWc5Sg7*GwaJQVKo8TW=@BrUTMvZRk0}d)|AX^K$*@ z)3XsG`Al6DoLtsRvatIuR;zdDli zp0~VuABQcbjs43!NqNqQI}FALh;%iW$%V{v4s;-+-rBwGH*!K;*Ykh)s}}vy^S&$G zD>-)OQ+is30d-78VpIHDn+R~C)OT8GK0Pb~-GX^XNpJ2kcH`rTVbH+y67Ek}?1;zU zv0g#26j;bW?>61V4z(}|ReFwicR{0*UkZ3Sd2N5J10EGq1WeqW@t#E8E%2uP%P}Zx z=LWDJ$T3kS8(Fz@%%^TN0c-6!JkiffSUeerr=aF-dw&f_wUlGSEEyKCx?EXdS|y!- zm84)w`E$1$eIr2e-i&-oxI8!w=U1m|e*VRz**eOAg%fid_HSnni z5`KOc-2G#68@=LL50dCMYORt;@5Hlf8+8>l=cKNmM3&!tb2V4gF4JpI4zFDfZuuXc zX@!Wc|NLK>tUAfAV3^0%8b{C@+%*TJTDNT$S;5O+DOm*{?^9k!M=CG>@g?gjf;h6@a_Ab zFNJc=KG)&`oJphf^tIj|-3cisL}hAxTS7R0u=k#mlZCetE1}&(aawFOlYgNJzHB16 z7MGWUyk*RGEv~$FE_pbgD~6YQC)md$eeoTb<+TsC7hW>c-;-mpdkLfHgAx*J0`r1> z;?t+U^0*jb$88VUBji$tH~~JN)uKYd(id~0T0;e!kp8KSlowgBip(VZ$4s>pMnhV^ z1`U-~p_3Vt0W~yjJ}X*Pgv$TByfU~^j3b0e%O?~UGBoA9)lA#07*h6yNa-sGv;v@4 z$U)Y|6&M!z`-^ise%KJrz>K}@jsD9%Ja2xY%gr;Q3YIPa2qG27I%6-5(2AIE5Z<27 zkS%dh9UzgoRIe6ypUIbBfc6r!npB*vQiUb#V5*OI?z}IGcQ0-LNipRm1E9D{QmPby z$W%SyFPyVf1(oJkZ>r>TtUUj^+7uD(KoZDEs`mg%dA1^U(QXYF^UE6~*KbaRa!t4q93 zp%d}+;OoqhPzIu3rKPgDCz4tEm4Dh&8&Lx#9^(qipm{iQ^~FHnl1Fpt?D7zvH_a0* zpZo?S>P9_s)5!4U@(~|!o9zD4tGK^L0T|X~w-Ouac>VFR(|BH-2=W3{JkEZ7y#Ket z-p6=U9jj2Uu&gA-Fk>zIq2oaEz8-posSVU%H~(Xj@VeJ$2UmRjJyx)v132STFg-4c!o=mUTYf$ntT>3?O6q7Xl9ZhV614gS8FWpwXRwIi>Z z<4lzmb1~`Y^Z*XcpQ@3cZhjor@zo7GCHm7~@DsQCvDlx5%vFBi~<^S7UxoqmVb?9&|IuY`K*s3H$74wxsb7p-g=S3S^hj46NVC!Q`?0Y*K= zkdnZtZ8jcsHCo*0Nj$foNKBnR7996^t2G%PRUjljH>~Elasu71D13pLGXDJ#&2d(6 zvLn|p{UAYZ_)=nX0QVj^qi|cHm#%lUPyJp?`@&^K*OHmhp1ylO2^8O4f zrj;FCBWXJ3GIE3DizOFyaX=wgN&Z|KPdet1NFXSL*);=dw6D6wKe6%Qw2->qE#65pJFuC33DHD!%A4v~lei{?> z`|Xf|4zujc$7V@U8!RrTZ`BS=RxeRq`@CmCRljN=GSv21-HxPpAAXeT!?W)HQA@YooqNMzTMHQW#VOd zJ0SHKoT_hx9SXvtR1!3NQ%*P)jxPQge6JJYaRo7&-w!epOqf>*6!Osb4UUeXFZ>UZ za}J#`vbAk_R#n;mVtJ7NH-|AUA#f7q1*7$wS4t&UdQm?v4U&Hyu1T5Ks||67fjiT81v);j?Z75VR*EqfI~ zH_I}t&9{FP|5MYj$a?Y#(jL!q8NQY&EpUnk`F#aRpOxYx2Y{ZFCOPFsKmC-s;{RH;ll z6!7+f-hSzbt90bM@_g%$<3h+RCT{3!j&^Noaijcc$b`&;z@Rq<0vgEQ5<#4R#MjCK zgp~)zDrr-`?K)4^lw(i>S^hvYyXdUeBwqW`h`_2)iOXvN)#6I-65nPD1|hm+0e@nc z`Dcd+N=H8N0R)KAB`O?r9X<<7kf2hnLit$UqD^F_m(Jw9guzRB!rb2Z;x^*{*H|Dz z_&qQx|GLXL0m{VCuuA2?x?fbJBITb=@(rn<*OBG-_h{Hh*eM*RnA$7L(hE|fu2XV@(PT{#qstvo8P zblnzGv6%fs7?L#}nEZ3G+?E-0frB#XSWj?af$YdBi6oNHqQPbxpDs5_d7eRuyN$F| z>brw-+V^P(o#D&vK*Y8{tEfsW9P~^i;(e$G*qi&_7U-}}{361&2q=8FFg5}dRIX_9 z$eGcK1Y%2=rBopVf%diW)PrB|JT$_sQ_Rb_#YwrvMto!@2|H$LMNA5`3)Xr4r;m;UxQNAD=E9cYN zL2%n6h3Vh`75ceOzoJ5Ub`^#2|oizbXi-a3F z&5%nqjsOS#W=wUdnd_$Mo9oTo^*bk4DoV{|fH5X`P6EPr@_L&?^O7JZhoX4G4ww*CxGV0Kv!!~L=T{G?^oLwcG5 zk^3X7a81$fNM1seHkUkOV}u$Nnx!Yx%(vwdG0Otj{wUhVSqdEJX|p)ZiW0WAbV<>@ zogosxm^L35LwP5@Pv@?o^;kT=)AJ9)QD^og8vMAd@;h?MpDrOyBbx}O=F$r-BWCW< zOiC|J?cmGEPp*S5k^;r6ikUt6@#fq}Cn#ItyfhfzPA{ODf!imJB9(nM%3zjllnm5>PVyH`nL?owMurlhh~mZNUm>5B+&1I|d$&|Hif9 zMdr^McV&7K5{0MgB@TO_r3M5YDO{4o+|pH7ri82r4*zsuWE<(Hz2Sk>a5(sHrdafJ z{OfQfJ4Z}w3Um+)6nQjx?_LaZ@vdR0$cuB2EAgUD`&z9wTG*O`P1CkoCLxKd>2{#= zR%Sp{7LZOqDh<#&MDaX9X0#~*36Nouf{kZjobF*%UvU*ABU`i5u9WX>iAX@vH%zlm zPVV!zGb>GvGRR8R3HP>jFey-NbM>n+@0z|J9&ww5y()F|N~^(NDIY-A4stkD=woh^ z_?@_7I{w4j)On6Q;V>K6nVY4xsR0No(NK{)W;NSHW)R1|-<~P{Ojvec;h6f&Vb9pj zEr67`pddM2^hM@5$nQ~U;?wqTg*y+J@kk^N+9b15iHf`E6fQ5QiYKIn8LS0;;;O@M zJ<8$edc|mIT#A(I`3P1f*{Be!*-vaxq3h1|Y<@$GX{tn5$-2b}0@m-t9kD89G>>8U z?j>8n zKQP@KbeN!)_0{HCqIhP)Z051>mCUd~eV*C?13lF_x}VSeOrWlMsY|Z-6wad+R|vZzT}(Wp017q?N%yxZzp6gz!XMKM5BScxkN;W+ zqh;{gr2*&5c>78WNWJ}3!ZA^^ao^FzT|D<~AR$Z1^^@TAiVW}~nA-hBg|tPE+O=XL zf47f(Iqy?HxCkH2{6;@2?U314PXLr!U|Zm83z^r1oQ4#03R%+czXMfjCP3=|TCe|R z6p+%9yiT|j2h;#O#Ldb6B38kt^L9?Ax4_U)Nf*Rzr>;~zx-QZjz#$u?rN<%^bPJzo zrzlOfv{GqA8&7tuQ-s>Z^yb&Cz}WGjr0CdaP`mM321}#S$qoYUAN~AwHu6KaqV;F= z16n^VQACFIXZO=&LDCZXRR`ZTZDtaK^@rBKQ};{-_co_GKKS(+x*1R@FE?MHC_9vK zs6`!0v(JAWqCC-$F?@nXZ015d!|&sIV<+lkppkmA^<#eP%ah?+MP&#>Qg+p8j|Ic} zaX2d9I8opD)2BnTKj{SqVV%&b zCxQ8yg^ei{3H@wY3gnunZb{OZEg!X47vk)T^8dokMgO-?{QnHjVTo(if6Hx6hiv81 zq)!H3L8Z1`X|!AM9o&A_d8LvKEdC{N6$k+)=zF|D(*E*lg`QGeNHOl-s%W?F;V(+u zPXYecs}+s?%nk!r1|dy>zJW9%Df4gMYguRMhpymFRE#Lx?ta~IbPPOubXKAcSf6vn zCu#-q;$WXt3DEQvl{FT~0u8wNg5|YHWt1euIk(|C7y1@>{R4imQvL?=Yd09wguC`z`28&i2^CJ@^PxJmS#?KX5Uot&1jFvF%D;+73YuQ`aE{N?NyS6@ znWBP0>Y1qU(GJ;bo`@Ql2uM5Z$@!=Moi>=)At`ve42Fyy1I4D~iq{=!ZCd2k9u1L~ z+&0uE6DvJO5pQb!_*C}B#r>##rZ2~Qpi8;>qWtjC#5A(oGw>V%{6_eTJ7H_ov zW+};FbO$q1JQ5svL9@OAs=Y^oY)&K9<~-z0Mq2|l*tfWon}}K}X}jOy^lA5%_w|Js z*6M2Km`KdFjrH(y;hr*`Qn6;l_w_Kgo>9B<(YHY!pY)>ul|<$RFrW(YNvMzf8}%!( z*s&aJ{-)iY>zzNdRXb0#$sP{_!wtW_ktqCjn_LZL7VY4Xs#C=6aV&&u8R*gYB(^X- zkuzFgL$jBfoN_O%k{MMvCY@Ig#rEb7#IKQASM`P}mdMq%bjg<7$n|Lj=*5 zZdGgpzOfqpf*i(Mav*8CLZ^Q&6>7WuP02JZm`Rw;iEp=~n4?6h4t0;)aXrntBib@E zqy*u7Xc{f`$ny$Guu&~QC1ngHTaptk$fg`f?)NYY>>uzq7s9Jm z=r)qy)ch0)E`eaTi>@+@kQsQ>3qQN^tJ!s-Y!nIYI=H>cic-U2xN(KFnl?IW=|gkq z?gK@!p8F8D->+{|u*yZUfmP>pgfbx+(7r=1HN1(~=+ze7xu*xvy1>5fpEc!obWU4} z#GRJ))m}ZmmElTV)n8t9qb8`&8ZW?Mf_xDwd?Z)95Ke+{zS7F)bW6j4byQJvekxl3 z63uFQvdur(@o}Hs55A%Y zB?vY$FFA0QW}uj(mPBRCfM1q3mPeCIzfvU(7{2ZO5J}IYD2G!&Wl#-JFE6D*qO@bv z?@iQN(*wNMSbiG_r~5GjzLXSyQ*gBKmb^|n>B>3d=JTD>rCPt@21b4agij4{Bi|`H zO&OMv|7Ae$ZcXY!lH|G39S2iu0mz)z(-gvr6O~Q7+fTAZ&Ah%hQiHHmSWQ zXENv_fte6VaV5;+Ijzw-KfXSt)V`Xd-a{s$e}k%%(E)i);%jhf3ClGg()xk0I!dH_`Qr77t zUlqJ2nIOwt9+-VO^o)OtXB9!T&5q2BE%1qkgch$gr5FR77oo#pacq2iGn(wpFir;* z64sYdK|(4P(ds&(gl1@!{s|%^{W_c~#ITlgsa*@^Zo}be3#Sy9!cKuP{hNdkiOJ8^ z<{c{0Tm}9kO37_YQdC>HoHZ`5ApfNYiqtJFol_+S`LDU5&mE^shv?*5+{};&{PLqj znI@)Q`q0_XNc7&FoYrqQAw_T*Q=46G5|>=6l5}`Gx2G*?LND5LY;(C8lPE<9r^#^d zC@bO|su)tK`Df3W4Vvh#?y<0$*SLv~6@0Wptw ze>XYdP{Oi3J6AK&{y>U~nvtrGu>`WlIys|zrczzFbM91I#_qxlxX$_JKS&hy!>C8~ zhK-@im6MDAgs}f8z0#Ic)JCkjO=|Bf(e>mGHXOfQZfxq zvQtrbMpM`*fn_jtifZ~HH|4hz0iDDIo3v%X+s2)gV{DCPyheh9IpBE%DZ?5iU`;lM zA%t7P);_1C2MU8|<>3Pwlo04JqtsC=49sEI6K6`qV5oOYA`)(8_!>G7fdmOm;Pqfq z4lf_uquYxGowO29YU0=JwpOig);$KyO3C83{+JGuN-Y1xKfdRjblU;6F{mBA z3?mAOSHce1 zVN2I)@p=2bN-e^ng@Ndmmy2Ye&jFIY5LcdM0fSmmvp*`m_6$6Pq|e|_Vju=hzu+Di z#8Q{>5Ub}r9@{osHf%v15>6`(xB4Kz3-c>Nq4%GvsdMGjj{+ltJ<%DEzj+p0X^4mq z95D0lvTGDpG-crFRKHTf>h-u$(~8UG`qj{Wf_(svUxa(Bg5G{F_b!mh#FF%W{lEro zsVMgNSd;K93y_l#F!~N>E|DXPX!$`lAd4Fw@~QYQG6}Vht|1hafTQqhqmOQTbV^xT znQfa4xNUW?N3U^3UA(TqBR@4M=um^T&+BlgZ;)n=*zhCv;#Rh)f4o&1D_2WjzqMlI z&08~$q{(JxQClbgP=0cHz5+P>Ek?Le-2!q5G@Mej_}g-}t&GX4Rx zrt|+eYxOKJ__)IP(`R^kU1ahx)cb_{zIM_klUk-Ig*##8pKmP%LoflT9Qd)WbYV)3^D_?0+*5wctr9-m`PS`t5VMA z2dKyh3_0xn;0x`)M;`ddtx<=Yl*O2W$$ggeHmVRvyCZY`O|VkC0gPJ zWN|Gys@bm#ovQU4^0)PsWViNqJTlDRS3IdoZ)#?7(7T#T(>NjH-Hd!S82bZI{bCcuS&Y{QW8GBrI>X3?kUYa zB)Xo>(xHuMdweIqFl{$vbz5!JqT}L~B)P2#@_lvMyRSbCmA9=KEK%6i_sPI*I4*vaSS>@DZCxqQ&BXH@!!)T{QR@C@J%DVw8yacBhS6 zT2S$%GKI05l-c?v^Y_Tm@3Q!{bQ6ukd%{s~=JDjl55YgH+_e;^Yt^Y0LKci=wC@!fNOU7;adox5=&C);Q%D4R{K68X*qD+O zb5!$;+|63IN)@#V?+lz_qRErU#*MSxg*-r#dI#&J%(S0Aoa}LVTgKknnpgq_v(tWZ z)G|B0d?}0#%q%Y7Z?+7k9Af*2))EZ8|GvVb*WFybC<~oqeQ}Rcq z`sC#*a3pRr_T-g)$al5bIXo!th*BjMWz_+i#nXt#7FxZd((M?{rn8yYYX_&BM$!gg z@@NFoS7ywi`2p%cgK+#2;i51F29x7_p ze_n1)vNIl-HDQSh^!@{3o#h^NZdC{Q~-sv62Go;%=-bfdsm94Oc%s-uFPOh)_V;6Bk@PJ3dKU54ksVRSL_(afKf zuX6@XpgZd$hnZ72k#8iiT>a!PXN-)Om2Or|rmnXiG|wZ7(6B^%(YiZ0P~ysb?xquykgFS1&3$%z;acS>qlylW1r1V4McNdbX$KV-M7 zjOj+8*rO&=5c2JS^zFNffP}5yAmNMME!=hSf}O)mE4Qg!*}q`vM&^FHlqmyTh>0<_ z<@tKWsmq@`H@&O{EP~5XTEgCzGCNm1bTiB7AYfR7_Pi}QbAe!NFJ^U7?ci8KtK-3olr$&3eNo6REgS38KzjgQ(LEHMyr za>_`pwdJjbOd>D91h9gSs~Qw^ee895YrgorHXvl<-VA^i`@fB}ch-eE0Q42sMp%2{ z?T{50Z{&WN>vh(A)wQdguAg_rw*8gIt=sSLxXG`=u?p!A)IP#6$*PflimU0qYd0ZN zpOKL9fTSta$tB2nhxNDS^7t+|L&>N?Wa_Vo?|JeMH_N zGu+pOV2D#kHkHP;X0QQc;Q$ReD-hi-VaUvli0}6i$^83i-+fOMbKL$1Gr+Q-=tM(` zL#=y8pFF$6{r{I%VZ!kQf5MBXNxmWV6Fx{q z;~=`&a);$;@{o9{o>KIalCJaTt=;dPX;zI2%(G{}(;wqyg6r1?$I}?GBw;PNvVS}I z*J`CN%Z?KM+9V5g&L-L4Y%`Ljr14W|`xT2j8GE2}_C-eO|ISZ#qwlCJTnY!lLT^6s zepV@BT{WI2($U;r*oL3JsKP9^NO=;V`Dh{_fVNSf``>_iZO&;|Pd2&}+>rkuksCvc zTiB|8OhPLsM~9#+qTp9kEtO?#A9K^l5Y6GNkB^^nVtZ6F$ON>{B2(Wd21ne!> zOT^j%SNu)$)TFN{Q`yFdWG5C0T5uIT%zw)&KKGM#xy&ir5Mf(Xw!%`F779u+N(kA% zk%;K|NqNk6IjZvQmK}ApO!4s_cn!NZFRcb1D2sDeN%WB0avXZryQ+S6&2g}@lEtKM z-p78sR+CzJx!+*TY`m02wOlZgQvHFE~ddYtK#x?RaH81*!)sVPC)mEEv;kG*Tk)s^S@3OWyqZxoFfyj_CfL@YzlT zTn?L`!BTE|I!}d1CgXr#W1dB9b-l{yMjR;8jLM_-pw64DPp3KEtl=&Xv^OT!6RuXc z57}8-d-fR?sSZF(KJ%5h-62rQt~BA}+12bWA=?5vjw{1=TI`AfyXJPHo%)+QJShZx zQx4F^?Ysv5bgrNDr&5~93)k8!DCk7z!F40)L)xjpuwg8?;ga|>iKhW)E1>fuJ?9!) zYExDY6*~zj(*4cUuNSx!^)NzY#vP#;x6~~3x)HCdEU4Vn20}97m!|~v{e)PK_h21hpt10;c)ezKcrkEY6!}KxoFs4CVt}iZ4 zrbw8xk^U`e`|R~!0kTD$Ji0mY(`<*aR737K<*xHnJ+tjHp46-{mTupVNr`vaU}bC> z{G_=93Asa=*$59q;^_LVf<4w{qM0Q9mOe0_md^PdQJR>KkHl@6aCwk(zb)-Sa*zdh ztc5lG)Pus~n10priHAQRd&yLY9WI)(DpwWSjn0uj>7~<%a-@ENJ1YKwO14~%X9s2d zxNqz>0vqOd-e=E2S*6jLWax!}9q8^IkZ{ds)dZ&Zf}>~1#<=?MvWaz>n)@B8bb!8L z>t-xRAqmM^aD}xpED1W?LOtJKtoA6ab7gW?q8`38K4&BYT`c^#A(d_Jx+cn)&x10f z;ZGRlBMcN@nO@js`n&#MCoJmQ=9H=h+t`T2<7#@6h#a~t8afYK#!W=)vjTg2$?$-*Br*1Wm zSn|#J|CNyqe1#fr+l}s`H$FD!6y;ua$=$@n{*iUH`{@$i`veEJD4|TkyncRTi_iT{ z{oNYC|gyCc)-H9d#41lIMB$$x8dOy}^@mqi2Wtii7xyY;Wak(_Rlm==_h z_IOU$gI%Bgu>Q~%bhM2}FCa&1;aj-tD5jgD%WPM>Y(I^9lB$k@9R4#pNeutW+b28_w zbY*w!BTGfGB-SkElUd;OBVgL_jjRJ*@H_jTYw7~-h1zddbd6=B5G;eSt5x_WEZ#jn}Zf9~`gJ)89ghZ{eTn;nA z6^`-)D>t5HwbLPi$3WF-D*GGJTrAd6%^8~#-rW7r(d$xM0m03U_=vb)^Mw+?ljxtd zG3$~Po5I&|#x*+P=5%f6gu`*p0aU#_At7Z-56>H;qw9ldQuA6U? zcE&v8B!i3_>d}=W7w z`jr(x%8%%@9{z}OxNFW&`p|^z3j$DO*g~~yipq?^G(Q&lCB=0;C{#WQbDpS)*+ukA zIY|4P=}=mNP;6nPxCbN`hXNQn!dPd)E}f7pBbBz;i=$Hmh_Z}D??A$Kf0t_;$yH@& zeuLJlyelBQ%vXxd-^?hZlXxL?FgilP7E)!kb9aH>rbs-nn4tYybIX-@Nc29gkWz9P z)+|A1Ym$a~$nkTG_!`}=fPb;eYFe9yf8I+h)Lu5i6}N#=cmg;c)PbWZy{I!tIy1fL zn3TqLx&X^D8N(=?wuF`axd z?=&Gb5^D1B`OO>$VolLLeicIlweJ7n>a8E5{GvTvQo2i8y1P@Q8G0DHyOHje?(SxW z?rxMWrE`WZ>5@jh-21~l=Y0Q%cfD)xy*}%Cf>iR7g?E&^b+hXdW?ZjV?5!I!SWN8w zPObyS=ONmTlOMX^uvrNY-z2w)S@cqA3A={O{hQGKk|}-O*M9aV?{H~fWHx8StamA7 ztg&jqSj|VK>bw3ud z;ByJ(Bq5Y$l9`Knd{}Cfl2`5bOWEwwLF^NDYBGv&R?fkyDzZzX`3(47T~+ynXk@;K z&b?mO?sewr*NMJ8L#{klW>WC%FX!v1@2q)$u57cxwC4H!>ZZQKZYI@&E)>&`SVcid zCS;VsK(BLf{pZ=!)XK}5JcqW{I>n$xzp9Xjogz)n3{NM0Z%EJ$(B5vv7~GhIpmgbZ zgeI2oXOd`|E7kQvd-RZ}%f7`T%&mjEw{6lh9P$2)>^t{!%0D=@ml6LKoN@Sna8Ds# z2H}0D-sg2M+K*1iM3L(Nsic_VYti*yhK>K*8~y`CH^B}Zc@wz@_sHlXKgXm~z26?y zM##zdAtWoh6$-RldHGq}@ej`R3)EKrN~uIuji!bvSiFof)?ZJKe!ArrxFhoq&R(^* z09CN-$FIwzLAFbQX0oCMfBMDs{-J8kC&<*)h+uj#ZkvdltlwfNAn1?3{4w@`7`5hI zDKJ@mSG(lAG(6QSF@}=Wljui;Om9b`M8-9v6)~%UD{ zuRK#}KuRy*!nu%ulKN^xwukbSy0Z`oc(QZScG{$)?J2SHML7%$-P@<~ zwe34*iRZ+(-LA}M16OQOF#PsPJ2DU_0pwR;s5D#2$F{aENf)a7rcg|OsXNFt?AsH! z5r8_gSpx8%$uhr*SvJrAz-YibkD%@u@Xl;q?Lfs5{Cp6i*RYY|VA`@<(hvCa+4oYy zrDybJ+r=q{%m4cQn2?xX&UtS8#^}ivXKz@5#vGL_l0$P33)!3$nNPgKg{Ict9eoS| z(pV(N@Rru(X?y#qQ>vA!;3oglG(ECXgp=7I{e?PfS?lR5lV--BN$M;vO<6S!vx~Cc z=@L@J_nTHfALP=@X<>_#dlvVL892YMGY9MmEfFLy4##rT3afGRPdBx`4H8nfF}dp? z90VX-aM=C4rQkfu+lo8xxvuAMlbt+BdJ(hbthOZ11=h^5xEdq9IW50Bd9F_UBqRs% zIZR)!I%`->T}_N`rJDiQyP#icL;E`PbmYFHEXERaQsZeH)yqYUj?ltmdNuq?OXU^v zjcTpAmyPc{6V8uuXgO#)zZ$)XndywyjGfO-B`gQrKfZxalr3CH$eEkO>yiZ}v@p-T z#fEvDT}M*SJ(v8vdFokzC}Cm=1;p-LLPIYA zM{}7L39_9AEqN3k{l-9m2IG3TT=I>MabD(zpZ%J_Pf95gw|vNw^XQBFVZIH+;LTsL z-c`E8Oz%0bw;JK>9epkbkLv0^NUW&iiw4dIj8_;*ZA6*xELKz|n16PTfRHdhQU_Cb z;vnsxVhu&V#O)cK%J^Kz7CGXh9~j`a(JMvnsA(r3s0S))30AI}?|=m#uCzYy?8dlw zfrBK^s6(%p#`20xcvT&{MqWYkg-zDYjU`L=xT~&5>1?~H-2rWQ&U8tD;~o*0Ur$x~ zTd=Gzk6^`B5)^}X9tR1KKdKBN28KJs3j}1t!gA0ZhA$4AlZlJ?O1|hL*v>@~4Pe@!PqBCw6TIEY6Ezl|{ zmQn6}LD5>0%nJ?hGj&cJRR12>HeZ*6)2rEwLlu5H*%w3u8$kXg1%q*a@k?i4Ig=dz zQ)r;gmN2?QxxFt2>aueakATp(!?L&2Gs8mYamcVf8HarHpy^;Sm!GolIEjyU7_&I9~U63fB`_V#X?98!DVTqt6JpPO1~q8+<9Y7$<)dbH;*0f#VBX{ z;_#Vu7PJT?{`61S<}I@7k=;DSmf!ENWPaZ5+z*c|P^v`2(GocJ%`(UQ&VOd?6VdF( zO@CiQ4Wp(q70z|Rw%Ik%%-m!-n%3&6=Q1-m^{mnUG!EbtY%B($SGIKm{jkEZ%$5ui zRt>u0u=Pz%>AC45Nd0mRa*R3mO&?Q6w5Iqt#RTWSeh>^-z#i8J!mCb_E&Rz1A8%-( z|BQq@f>lL;dD`tQj5#?jrl?U>Oh|^B$yRRs;Y2RQ&f!-a?y7QxVGd8O8IhKW&$+@A zUXL!?k3=?AA`BV6S7N~07}&s~+&|MnMp#J6z-i9TQT{WqN=*-0u63owZEBDs8e^g* z2y(?l__&Kxqm3iNTaY#kddX|#O|Y}1f?aWcGF~(m)jnJEf`7kROZ3dDwZ-W!(uVw3E!1Er?p-UVyu;KP z*^o1W;pXoL`E8O_!=AKRp0@4N`oW87x#k6^CzsTU50(8eh(20%` zU3G9qTh?5&lP?0YGD>(k>VLwIFGoAD0xXID!6i(Be}BZXSv1LLoE?MJw^$3|#H@N( zGfaq2O$CHHy?>x{qleWmywm9+UZY^-XJ8vmVj$eOr?ke;7)Rs<%$mKQ*uh2di>7O$ z-4T{E1J|ik_(RZu$@3_o+TXmAXsU8!i($QxCgzao2W&%L)(Z4%AXAn9&sJjn@+<7%5TlVxIXP=dn-V6PTiXHn3I;E z%-0w~{0L)T;Xym;@_jyuW^k&wAC_~<>-$>#y1$S-be)C9;(3H{uRyF|9)KTqE_lBB zF2E<~U=0V$HIr$Aq5$37Rs2sdj`#{e*@8_ooPre}K9G#wgPz4+lHXBNyY~Yks9M!G zb&+ZliciA%Fay)!8Z;Jh?r$3s_m~GQ8cTi~n+5ZJQk1Exb8=NY-sxm4WWHTwaG3#q!Rc!xg(P*p%7w<=fMRcfB1E+SWZyNfosa zN#}kLCS$Dl6`+=DpOEIR&$C1N8;TWjoA22YR@xH_%wju4 zt(o?z{@dD?@IVvR&L9abftMxK%Gc1?AUbIujGa+u^>kc|LHOEb(|!YoPw}FmAQ*@8 z&Kn`p-O%_Q)6_|ltl|U@1FFz+Dv;%Fb{>xb2|-kJcF|BlkVZkG%b6AJAZS6an}_6Y z))5bf(621|8C8iq?)G^|$+taJdnHBv%Jq=)D>fAke^pMIP^s4S#rL_}nqp~Uwg+Y7 zE33q322?*9Yr>gTExO!S6q*j!Ij3SL%l))ZwA+b3G|X#01PC0Sc#tUHenOAE7^1v5 zT}Q zR_QNB8PUK|lO5qrzX|lbx+emIt{@~@f@T0df7Sd zehf2LYLjmVH`vGK>3#?i*g;DTsEW*{r(*B3z+*l_{{Ed|HAl&H-m)7!rdtC~1lx_F z4}WEuZ(GhTLN6{NFfUIOi6Oho-pTB6j$dEnlX!rPBP-bQdWVxCE`65to3P*X55cNF zZJBUPTW!VrE7J;F;xrcVj5m^<;I5d-Umw!=mELg-NF^NT(O8%<`DAWfGyEeg`3$>m zRdcFUX)6DXLsDFcPJ}qwwrIYj2`dRQNJC$n&<(G$(@YqR{b|5aa=4T-sIT!tk?5KE zJ5TdHrj@hJCP|z9Fl5}>_`S52GxhQm-3CJ&%$`DxY6s1`rm_h-RPib$=DC~_b{*^g zpO!HHe;tn|*#F@EcTY%o$v1)qc>Fei4Y{Ksz8b#)kSX=3U!wh;Vrx(pUYxY?J;J*- zW?bAf+vGN4^ejA%JN}4Dd|a#blfoX*FHFryr1a9SidAOju>9ih4IEWvkc4alp zNFvjq6zHAHnjT)PeY|`}H`^6E;-%z(X~@pKnALwH^=IrP#RP}LqKAZ+d^wJ<_*kp( zfOUxMYMqziXN+@ac>lrrs!+W|08oz~W*gv8y9}SMYfjwLj7M{2l-jUrn8PJAl)@3` z;5vlXr!2-e`5}XZ!WIPQ0MOA66}%Rj0KK(RN16x}DZod}FmM=_q-E?cBX9F3eeG6f z?0RY2lky32P2&ut+@g&)4}EpqSy{aRlN+_3)9fB9FqvE>kfo&RU2e+vl}~=SbrlS9 zesj+@W=<>&11Fq-E<*~G5+Fsg_8i@zK)oY}4062o^)|7$$fw;?nebd9=12PsA8ni% zF|DIS^Y(vm!0lJRI8NLF(1p&FiTzlT;JEgZwWFy06GN>&7C9KPvGEK(Ste$&Ut}jp6wW$?NCkMHGF|0aFcePP8#FH_4v>zsE z_W6LT{3|qgyizl-C$`Zoh{t}9lV{uU<5%JK33ruwj_&#SqkV!cCrERI;U(9(-zPMk zPz3=9sIRI5a{{XgAh>$SFw@cTjadnIh`0BauEjLlsX!+nkJeSgd_51Sc@D9o83T7J zNMrG*^*Ojem=#zx(2$OC70ueb-0*PN5takWdZ;+h)8enGu(wyQd{0 zJM~{pea1HlPixY_r$Q>x!b+c0)bFk^Bq?($O>Ghzo}M#uU{ne9PJ_ZIZp4+$2@2jo zs5A#>)_CeR*U)*;qJd2i%oOlPh}MON<^#PvFpTP32&pr4;II!Y3urdUr8=4i|w0_<)Ot=vZtSl+9URV86T#R8Xua-gGZmsw|xZgE>LuK|Y z6@8=8M#FHxf}=hZxBf~Ur#EHq)H#OkRWzd|(}+#5l>bMUz)kJ3QPE zuLTUGb{aq?%@lz9QiX5TL^1pBp%WsUj)PQ-Wp$`Nlcg;fnWfe)uKfpj+VL&|R4kvt zK>RC39oSh{r3Q>)QGj?M?%gi)y*7>Lg-@`KdmYE>H^=`y7u`WpYd6mvBtjU9` zI7&XDUz{|0ZeSYkeU;N#Qo72%)EUGSR$&w~UqLJX`VTI0o|HT+>6$ubr5w%J*C>+& zl)b{FSf#b8r*~UyqC&fL3``y>`TTk{U$CFesse&h(c5**nTSOy3iA9p_HZB?eEm5b ze4y|!5h##Jm9V7PbVv-XUi>)Nej3MEy%(}a5GXkvjkiMZ<*%Zh7~5@my8#!j&a?s# z5WBqdeALs@DKWEkhFY5er~vl>KoAI8OVKwVR4p#` z-0zMr!8f@pEq+1u4{p8NlC~q;e+krt==H`{Tv#zqkMPTyh)3P6YVb!*x^OL<`r;AJ zakvQn{hR(n!qVGvtY_gl!#q*DUMyG#KdA)Pp6l9?)uiOTxQi0aa)YS5t-nLQyoNc8 z%x@TC)lX*5mr7~Hpzw7H*_47x4ZxIVM5uqLnPN z3+E+Jt#ysOS5jaxgl17KCNuPb+S>lhG4!iK2Ls1&^XB=N)^${U0F{~52!8GzOxEG3 zZe?mv0^U4=3%5B?def<{?xo>}6S`F@D~K>zs@vCrMc=^Aq@%WjyMLWy)e_eI5;$^g z4RXq!g!JF_mPg^=Wp`i@VQWt_PRTtyU(LR{xL_49alLOQVgKlj7g8MW}Gg)CS z)Wlw=fqSr`%$~1DdOnae;x5+pLgJBZJ724-%r(~xSwl$VR|nu?Dnt-mVc)^gv;vHm z(~;cV9MNJ?Tt)Bb=x|k}DEO%j{nPYQY$r%K;=4F#7m?S|#TW#O{LAv`?*8gwx2LBH zvzP&lpxI_bRG*XSh#MOs|`E@2_yh}X;iJ=j+4$?fi!>*z;9*Isv zpYzGOK>ZO(9)v#P&V_2O_l`BuVfU%_5T=>WZ&YC83a%Qj*4Mfy8G+OKgpjraV!LOC zp}JwwSLf>@w!n%DSr1oxrO0+wtv)UVFl6u_bWi1ORI&6J@cIR5`hG(H2dCiB#(Bp~ zwr@ReAyvYBL>kpWq~bq#HhDTIghmlJa&0p4NY1FxIg~~!Wzf#$U1hg|3!*gDx!QUc zuFe??S6!3Y9QB^9cI(GZCTmCIKKh1|hzU%^#}c{h9@xB5?H5 zf;L}#94HxeQ{|e-<8z#W48rT{)fs3I)i22UFyk#~k6QAjEtW9%%u;1E6?8s0eb>lu z)AvaWzrNr|3)0UD@LJ!a!4`i8$hY;iqbm3+YOm*Fr!VKiKHzWP`DX%U8!ZtP&|mPI zxkN`ZMS?q>KX&r&_tec(#Dxn*vuVxl8-{}jS> zuqF)ho#oBzyqKWf9Z!pu*wkj6pYp7$lgn+|0VMNzmEv|PKDm_z;=N_}It{i@b2F5k zH96~84bY({(fc-*FMzJ0HvW@TZ+N`X2YR-j5h(f|8jd4Y%PI$WO$N+)dA^M$$I^N8 zSH~JVY7$QC&FLu?!Df?tES}HgIbtjifx~rj6*EIxU-j4wj(@2)NN&#A6wGufMX+j< zjCF?lwB0=CLGh!hZS_Qd(%MWP$FnPT4?dHFp}!-d&-&`<|G_okOJdl)1bT=J1$Y!8 z(|(7mrN-4BBwBeml`Sgv;L0mn1Wna|H2!B8A^Tt2vY`&w+f3sPAK5%`F-!a1_&KQL zr}SC-1`*Ae(E*92U=~hs74JEm^OMht_G*miluGBzZnORVx>7~dpyHn~KEqC-A35Ta zV!{14n2#NH>7`j@ALR^)t^f`Tx*{UH5o=@wUP@En)2YNFYto{x z95D##*!qRcwKZ-wB}RmaT^&1IFX<|>gePyNgoKEk>{dx^c_@LgwKs*y>&cW~E~;$N z0o|-&J+eq}dN2-d-sDQlm#Ln{srqb&t!SULccYzj(G5JB1Rq9u#%(jadz(X8dwyJ) z>#zbRC_$;K5Q6rv4a)i4#rOA{W#rPNB$AuZ3^hfp+2%lw--?Pb$w0Z%I-{7x)NM}{ z1|?LKEsfiT1o4k8pYY~W;iWmJlaD_$7rUBFq9SWuU;mO6jc)=bBGsXv54F{JSy(&d z^+f7-er*@mDz{?Px*Pcv%$X@zg*gg{>OMuMyA;fDJ|VB#=*X;V@&)<(`=>V;Gjcem z-Uk)g>{aRT~-`1No}Z&@Tpqm*!Q zIdU<>m*!ik9Ub%$P%rouVoE;JL51AEQECriOkkumCDieV2xo=tY62K zgV_55&uhO4NwvMkq>}c|BwE!eJFh1y3?Eoj^)$~f(oS@bu=~d$A%GI@o+#x_e#elo zZ^TR;kUpBjm1)|6ylKt1N9G=NKq;)tkWs^q)c0HoOGsL?k+d&r4S_Kos>m0%AWVNE#u`GYY~b&>yTmrPQ3Zf71HB5h5^cdE9kzA2kk zA)7z_P=A-*h9_f-JSbK_ISzYV8E(b75PR$i8F@~SPJ1}-TKOUZIF|m@B96y3@n^y5 zOgPpW0hcod4ZGL4rVP7pj9X?j8gKDCgH@CCBn?<@eTj?@rEhw8OQ=!J0{tYrR`hvr zI>olC;fOCl8Ks`3MIn&ng#w6~(^QVXUdA8pd`F`N)Knv=BRw@d@~x}X~cI*=X8cK<)PupTX@ zZn*=_aHQ_K3R`A$tUo8YcS|rr1gFm!UpRhzK!%IC&M907vKnI+La^H}t5&}gA?>V07WgyD2FH^Hpf^Kc<=Z##L5O(EFUG~LWKQf5IRo#0LD$zpW-I!=+Gc;p;?eX$Z}sW2 z_P!XY7^%9};K=|TX*l9PI6Hym+dHBgsTB0$hXP7Yo>6pq$OxYcFkb?fnY5S=WG~rR zzYsNDS-$_iiINS~_i%Zk#tHzKhx^6)hUn1n5-6Q>Oow9mdpbD)k`rqHcIuK6o~8kW z3Y1C^_9`=$)go8(s*Ph8 z=07-_d(;QjgU6#ytHmG#GL;{ZCU7Nkf|J5wseb5>1$GNOd9kzAU-H|>H%kj&n@NQd zFS_c?mXM$q98mldEvfQVtG=N#R;KbYw`R6Xl${9erhKoX^sPGK<&UH>LV0_~NzSpa z*;jhN=c3^ZmeCwrIMFHx#3S%$hOlNJ9=2Auf1vPX7Zp1Y%dw(|SSqL7wdD{%;{VDTHUIg8IuZv6V5`e8mLCZ$6jZ z#vk7F;-x+gsnFN*muT4x+TCTLl zk_ka=7U#GzyQG6>4T3ftzi{2>oqnmo7G;h6cUo&Nhlqs+k_`x2!;mqW{?Za!jA|o% zbYpbpPhw#J&r`j$-QENlCE@QH&aS(01LXTF?-L9tl)Waih~2(%NY1|%dm(ta@PmEL zx;OT39ld_K*sAuaOtVoDa*-n(OC3&$DLR5XH3LOwwkj?Ag&%D0s>LhS+NniXX|><7 zDQjekQNIuNzPdkW>rUIa{@fN`R;hbRcp-^91xdm)$WFj(uvX*J!PSBU^w_=P^?39NU}MO zA4WO83mhAi@!XbwPk{w zU$y}z;uSD5dHOBt89RgR-Mp2Ox6!iJasi8xyk_X&r5N2NyjW*_JCmEU|%Mbn46LBQ4v`Jh_%yk~1p}ZK|5fXqT8^-i(An5EjA9{roEo zxKhz6Xn(AG6`R!(L8wEmDfG46A?1yd6N< z>zmpUdY4ip5=%i)z@hycS;RAJI5OUwskK-Nr)uaz`>Q7k0R9&=h5Ow{br&2o#}nt9 zMp8ufX7(T7l{1wB{GF*Ipk`yw*0qA=yMeR`82-`P3xGH0#t&30Qthwof9Xs2{Fx7A z)hc!tH28sun7Q6~PhL*;IkOm$l4Y>SH6~#R+TRT=p4}Lo?{AMMC6eyaEhO5F>VFPwe@5Wi>tZ8s;UW`iI3y#L^E;a@WMuO%fn zImWWi@qtYy}gk zKwBz8e#`GKhkg`LNeu2H>t6Y@(eR~>NIB9$h;CqCbD32ANcChd1DTI>3KxJ9!qCB1 zE8d$HtSzL>1wQiJH_XkL`WWUZuk+j!8$0-JISudxa`1i5n);KW{xac~;uHE^9wDfb zrMVhMdy@O>>@)QT%dqF z^gRg7OK#&PQQ@GIqu+-eFd?Z=(B}re(0qepwC7#?gF|nF$ghoWE4B=~^`sj6McE4l zF3w5V)rkV<>B7o@;^<_Ti4(3Z-bT}0GqIh?HxAivZc!8XR;5r5RCdY9bXzbp0z5Th zB@8@nxbO!tteT;CMg9w6AG{PO!z_(I}~(fmVzZ>M$*i2kKP z@}&l#9zpF;jZj!tN z|J++RNj9(>gxfv|vRnBFw^qAik&WBFnvE%y`FIKc(^vl=95v-`9ww~)oaaYOmwe(g zNBi%ZONBg^``OI-)jQ93t+)IrnCz3X(f20?&i1m67wt9JyHinbpf(_$h zQMaw&N%{%rEkeVKXea#oi~ggU<87LdNPPwRON|Dg=UEbxu6ZwTAt$|x686!)O3--d z$rvkQ%JITQxN1j0ndEpvV$9P-bTc$HC z02qTQEPMx$-S9gdvP%X+UP#6TQoLlo*O4X1Vr zCGT?2kVbuCDGY3Y{VTtQGPoI$e%GJU@>0e*C!Ts_;w}mH{~`3s@cq3~f|gL8Cv|Is zKz}H_y~U7km^OA#;m0QhU=nikQ+yt%*lY*LgBd=Ar0wjYEea5$)ygpMbwr;Xb9{%K2E}9Q~V7@EA zO24a^(MW+~fiu#*piQ!FS#=j9JHJ#J_C^1>(bom9R#cX+-K4E`0T*Sfxi8DoAgxr* z8*1PAwmM$@9Um!0s<%sfDq&=o6U)ufPRMF6OFVO&co#@=HW>85jw_e!A6&K{b`g97 z*(d!j;j>`ByYumlcuUtWUP}}$+0|I(%6T%YDs|zJ7Hi89DCmp18toxygx4_Dvb?ZN zZQ-gs0w?bPMEs5^qxmh1{wgoArA?s@njSwgo$mb(;7#(^zrivWL=}l;kfa=!Mz^lY z96WDp4IxI;&O)4V-o{>O5%H8nn#^*8TQuBoc_9|Dd^^W4E>xu%NTJuu1u_B6;-4~W zOwr4S6TSs7iVGHs6TG|GrRW_pnHzr=M9`rX7JHy8-;ITzT^KTKUbD^0S2}FKv3VW- z2S;_}#V8p)GwL3LwfB_xkv?6{4=JKNg3@b=4WOk@Fkk4`zI2Td;t`SgCr!+{R=e|t zk>w9xu3=dtxXYotN>vs}C20AsytU1$S@v^E9BiU9}B~z-mKy zHdO?vMf;ctTOL74N(F^L*o^|%{_#RU=N$x9SF zog;;~$vGL-L2pxX=3ZlXLY0yonpy<1xPX0$|F8eK6ug1>{)4bK)S-SP4SoUqJ;$y6 zaCuWsZFhGiy#%K2uJpWUi>6m#H}KzEnEr!;V(p0 zTrP8({DX6^LA=C?eOC3R=r7ohdSJRzdk2RNBLrqJr_*0h+AT~S)Qu6{BBY?_JxnJT zitatdP(goekR{xCK-<_hK>wIDlUtUH*7*oTrOqc}WsABPi7Xy9!9h>V{b4_hstIpBqvUzbq z1L)vx{mT}#keXVs?eUUM+xGOe%)E-i-0N85Kga4KI}u$`bRd&X~$M%{y0PBFK7E=7u+|E0!v2 z$M7K~WaB3#=Q`BYbS<0WD@6S*``b~BOOA97!II#&ti8C~=pg!l#6CaNyux0zAYsaB z!r>!arj};q9g7S?kV%tP{M3LX^qLqsb0Txg_w(SpSMMZtyS8FQGlg4=D90= zq>X@oOIB6FMYjPO?c{)p4Bh=fcZ7Svl^;Ed7Oev(PtB&!A|suVMQ4ceTx|mn%Q^>f zpS4Sl71{WizCAiLFJ&*ba7rsMDD3swc^p0dgX`N|t>2n!ri?6C=M=%6>On_(kJ8U4pN=(aJJi;62E~Ao za%+GoBeCS|Odpv-my}b~uXF_!W^ME^HJQZprDf%Bw&z+K@vf;&zP%CqhD2#jiQ#JP z8+Ael;;BvNty4hK&X*65Q3ZivrJ`DN>dP{5gR;HSy)3`RZz_M&ds6?>$_96CEpotP zcPEz~!2K$jML-U?%W@)aXI;O)joW%k8K+<&JH=iw?VGqa$YELHCUDo?99-T7?yM)T zcRLC^+WYBRcgta-f$R@bmrYr>Jvoil+L}D3pMr@({cIR=hTYaXq<`q+%{_sAez|sd z`7_%gGXt5?KQTc(t)RZj#iD+M6F8(O7JvwS^4z_qEl+$@&1ocVshac3yE}9OBd;H{ zL_%g*PFYVU+Tk*_eKWN=v<6kAmA zv~8U0kXRdm_tK84=_?dqeKVclENLskPwzsCu>8aPxs7$dl4UW-`64$5_EVN>^CihA zi5y42uG`E*R7%2s>#+qa>Wn9-la)C%mF~B=(+_zlNFQ&1N zoX^K(bF$ZZ`1Ev2IT=cM(4BeC&Zk_Eg0rTvnNL};qa#=?x9I#}W^O(rk*F!uK`UzzSXejr8;SK*;b@=~U zfXR#f|NAhMb%NR!ta{(sTAk-BD+UT;@hkoZ=iJa5{KMenA%fh8D*acEyno{bFOOt~ zLR;`+;#YpL2PKv9TzC9Ia zzhoBqsdO8qX3j#HvAaY2a?lh|)j#6x?N!DSEnm$st#Q@^(!qvQ9PLnmYX_ME>!#*5 zGFGs2t+hIx6G^T`XJ^xX6H%}x%~FMrk%^~-eFbh=8{#2LNvX?vTEiFrlpMp3Fmq`~ zhSE|vxp~#uZUmKp?zDq?xFw)~ws*Lg_kO=gV_v(+7`}$H=RteD+*FZD!~5u;>kvDs zJk5D6>FssfAkk(~H`n#bf@{h)SZhb@;~4x|S(GnVIe3r78WrvaW zHWVPHZ$(KU(;kAGivuT+oyDm=J}kqn*&%vRyd?cDlcE+~2T;~y!L96?2 zhyQls9ita5rv_>^5h5d$ww=~Ej>}GHZASXERNZp?`P>sX@pYLUVJq>fpdJV)l;gvl z?&ZkPadF#oGt5BttkYAH*Y@p+8h-7`FqTnK@eiwjg>nX&R(4@+agOu73@ju~bLA){ z^{`-5)i@3KNgv;bzLb{^z*lZ!FL_!ukG-RU5lD`SL^@px(Ryf;#~*DAQUUAags7J*s${mcxB=sOc z;s<+#VQW0>9XZi~t+ZTo(5+y<5F#z0VB&!2abWo;BIgKlNfO4dB@WcbDbZH%H>Iau zivnZ3Mj8o6Vk1dA8_k+T4grfDL)b|s%_`oAfK)d9&=xstXUs+ig>+reK!<%*osEWT zpQ&CN}s&@K3Rf>7dJ!QQL;$%6dG|ObyctTPWGie zeS9BVyv_Rk?YZh#Y>Cm;7BTX10|$!-g=!*hj7m77it);z4clJ6Z~mG7IL>?KqS}62 zwQBk{QwcPXZ|lL>bZ_UL)*@VQ?D8mM%3qdj>m48DeoJSQ^Q4yR$uB%h*D|et2gJs7S^ANn9Qd3AJ_GD$GU8MQZQylamiyx>cAa&GFceSvib z;s^D(k)IH0gv~l-4*=7}8Cs)0Np;Mj!^Qr7Bk9c~e~(?qh$xdNam73<>yT7EWY+1p z4#*tKY_G>N)Xp)uy?Kwh)LO;m{6H#sr6|iBDA%7$`Px2er>vlUzA@C6$q`gM$^&0z zSYcmlP9LYTBGxVnv(uR#*%O&MbwWKW0S@e1a5U%wpsXJ~;EgQu!2}tN7#v z+RJ7^8j|O>u2Bf9E|3l*DX9CxwrwLCl4hMiUGp1Zj+eBf#v952ArQ&Jw6@UBK8JF+ za}wVMDkaJA4(2G?uTeU&H!OQ0v?h_AN`$CfpUd=rL@JD^daT!<67k%f@}1%ICQZ&Y zcp}P#VoNl`IYX?7Ocr}f#r=93Y*^qFLr`iPIPOJUdMY>xT$(ZdbiD7hbWNRHK5sta zG?;p}crY|8r*L9bKUI;sjR{fmU}NVmRZv&Cp!G#xa7*uJ6^knxb!IjZ2`gnmD%t1LQdJ3c{X=Z3>#336ZZ}pxk2?=br8EM zl4bQ4+Gh}va@IjFDz)eE*9W=UXj3>?l^>f@7483e>`A~2{_pC6p*OrPQ2uZ3z}t2k z>2qYsqHFLMp>&Xd<3UA^RuB#RH-{Gi54An${W>8nww~bd6+p7AyM7;xexBeKzMd}5 znkmjEU7Oz-Bw23KKect#@(*tHNu}Y*+UP{_gSPL+3EStbdoXooJD%39b9IFe-r_$v z6#496G~)cUE!W#maO)xB-EXXK-*~C>umwFEmksue-d{>86)@=}&Mw&n+>inWfNjehU*nU4`ElF9C9OaJkI*@7PK; zQ_h{mXztJ)xo2Im+Af$j5$iyNWs9$Dp#Dql z^iQps3lTmZBhC9MQkpnJS=Q3pK`V~tRCs8Q^R6hqr*Z|$ZmSf`EJTBOQ%zrkiuD&9 zHGy&ZqLJ>2>K-Nz_kP^?s0*g{W!@&ZpsIa!kL3tIh|N=x1kC5cPvr#6a!Fd)H&1YSWQoFGwb+%SDVvjK9Hfw($ zLa0tygBznf*)r(VX{+aM+4%Bn8^%V}sA~av>6Z^QKT44xEjh;K;8OHeIuXa+I$I)W zb53O+^6OuMh94lUcoLv{`%`9_DaDbH_Xq{~wV6xej9!&G>7)YBe=KX`sO>jQsmGs3 z(c~4sN;My5T7@@l3aEDQw=)G7;7mn>AY;4=aVYoOC{WXBJ4_2{Z@f=7+wXW3<|ezG;q>%J1XwXxfM{ z=r6QK*cxQ9biA{D^o29nC_1Hl!G@S*Nt-n?(e^oJcstzbr9V*?8gp4>yZx#as#cQB zjkUgcHvoT;a3uR_#=%dYuimxanNG~!jc#sre8lLN0}LGSqN2ODHAc`2XHev>H z@49;WF$R0k8 z4bzKA^$B=Oz?uDLsmRYGPD;kEemII{aZE~nCn|H^SSkSJDHiEeusAN{3x-yRslpme zSTua`MXK8Aioyi}+@QctYk0T=0U{`6K+2TW;mj1U?zrq z6I;e9=1$_$o?c!G`U*Z>hmdYMBHp2D7Q^`rgGwD|K^5B7N(;Cr^P5F{{)Mq%In2gQ z?`tDsY(xH6qDNn+wKZT+fmhBKT4|O~EV{|1zko#g1v>&nrwwc-&aOS#k#3K>eMXeu z+02FCgzm;}V@B>Y5pISZOrza%kn!~9#>r2`03$s%Etc&7fqFG&G)!&bM@v(fpVr{H z4XOR8=Y&EB_jDGeWxIBV(3h(##QK^!1kSV}h_v?S4qP^^AWLOp zoa*jPupal4ktGK&RUDd*o|o0NAEIO=*gYEqmu z>Z@hCc&Rm$Mm^hDqBaAS^rd595t@VjWJ7ML?&{jV_)CU4qIaqEhP&|B52_c}9Yv^w zhfw>?oYP})lUwH?#c+u64R))$atl(TxIji*Jzk2g+z91d=KrW6|FdXdm^QFOAKck+ zF-MFVRN@9a!o7b;fS4M(QJJLwmI>|p+@5DwT3qzeplYd!`jF#_Ym>;dl^#?SKZN6^ z#<+&RJv?GyHSR10tcXznc|gjSXMYFIw>e}T zY{Oj|HKKNtS_KRQ-UI%$MJ`TC{LD)Y*DS8OeRJb9#~p4^j>Y01K{CoK)fJd3LM23f z93Ybj@YTm%{v3)i=;YK2#p%@w`hSM2f*b^{P~$qBdI!5f9ldAtA!h`lx9bsx7(I`Gp`v(BS5`UU?NWFg0wXn?7|$CK(lW z(@pm3Spl+#X-hEsQehApGjnw&xuU_%p(6SE&@=YEYvHIlZ(aIKH;-b}?{QlntrdRm z9!YuhHU+$m&%j$b1J<+T<@ScxSO|75`tII@Dgr?7eujIh|4Hj05r4#8-#rIjXKJp?@pVG_#>X$bT9 z^1zDd4}k7J_Zd=t{?dB&CLte8RsX@6UF&lkLxfXNKd>a_clt;AN9{}xr8iAE^Y~4i zw=4Q;BTqkqw6{Oa!p+qxjpC(}=iT%jX@M)L~z@N9gfopoml(p1QY zgUo~hg9@iFcLXl~4`FZB6-NWD>jJ?wcyMse5!gQd9qumgJ}sDO@*QFHjC{j_75B1h#3^fFE6cb@UExFL5<^xN4KIrk zkyeA+a{m75$8Gl8?_^&!r~JO;3rS#^`)U0L=iPtf1MuH92{Dbf>Os73w8wJHeb5S$NfPCugKLL%s7zVY1;*Ke&)%vN(zqC{kMjov&y~OJQ-kBB8SX| zMqMGnFv5LKMR%er?1bjSX1+dZkK;XmzWTQ*ve^W#p_h|*IJ6FGlP9mm$x69jWthcq zlpUQ;5Ya(To$)B&FS0kaAWKR_@w!st$f!^6D8(x%Fw1b%ux7oJ$HcA*$=Ti4#KjAq za`<-5SlJp`OOLwwTTES-J?&;0p4HRn`@S>~Kxvp{+vYPX8G0Z_S z2emJ?+B#H8^TSg>3YL9j;iuXQkvNMM!+-0|TQ4bg)0g@X&dXqib4Y5PZl4KN!5K3_ z`{5&*Lj?j{ZP-%ZfGtlRmc}$=m$>3`mJzpS3LWix5oK;xHbM^dWPBzs%ji9Q+>QQru4u_3+*Je2QSUlX5Ud@PYvJLWM( zJ7Li%0jN*X?@w!M9PJl>8RxR-VidbTKir3TdpaJw^~iNgyDx@VW`EoRg$;mZu&CjD zQt;GT%yEb+(V>L0(&f%Z*uVmDenDG6!1{LSE#e`b*!;p>RbJgalj4TPz?R|49X zm}U@|C56lP&CZ0;AG|EOZR`FuG>g!x_q$Xv`RmCfT0M^AWeWL1^RI2Q01!ki= zOK#d|AfiXEC-)fN4gn@K?Wc0{eCWf8&wbj!nj9`OGg8#Q_kwVy2nS>}EJ%N=f9nKP zhtU+*t=((7SR8!)5ea%=3Rtt305D5w6&q-t?kd7*pgTTW>c`>BYY+>LLAJWI-w(qJ zYXQf|8|bpx?=2w=%-SPGo3=4Ei>@CI@C0gxqy-Sh?OAnd^p^MN|FWqDzvK(|Xw;-| z-z*^#wrf=akpD%m-bD(BM=v5q58hYcAD2?7s$Z<2BEe(lyRrbkZhf<){(~ERTA@EwvCK24XhUBK7|rcgfS4Pkfn;WQYD)ALYJ&5m4x(OInMDbN zU5Z))M@ipah(6>mrg$`%o?H{VZuGeo1Cp-46XkYqQJW2?9Oe;JyXZ4EsJwT;lP_U5 zSN68me#ByX{k#!r$z0#TGN;Rn!TqIm9=P~#cTsN<@`F%Q#%k&6bU`ct;&4N?}|VJx5Qm+2#>hdaqX5E!Ei8_>W=Q_Zk2@v+L91vlHKHUnC^RzH^mc zS_T4k(m(b`wpj^(jq4O)S-^f}A*%C|W6u6$QsZcRQARbS_eoNdQbhY|`LD#ndAEN+ z`DYwZZB(1s9ZarjqrZko$hB;}s<*TLo3GK++x^Y2wrrlXk*s?G4m1@RH#H=fRSY_+ z5kkLOJ)ENAJDCbh6gyZ~AyuvdjF@A%y7NnFF5lL!t3d;*V#HJ5@hz28;t{IvhV1`} z`kLTqYcdo~>>Z$_W4q6jcQs3B`VSg9|E-jzs(%Mne#0H!&wA=Tmb^Y)h#8J7z3?s+ z2IX%LePGQW#O8tWk%4~lokv#A{;Q~G8`Rdg`O(O0VvDtPtN7d!<1*Y*ED^y-G3z#e z@G6``GCLXY&=IJaz8h1(4v>TkZ~}DO3Y}1)tai?kAe;EstP#O2PNcx-1W27TIjvr& z)oNIHfq$2$bZ_35$d74A!^s_)Bg#XL{d+PmLUS_(c!14-h4y}TojA62VRXT2$a^1L z`(h(3Yv_jlE=MZ@feeYu{VmaBEo0jd>!5;;u*LeL>lz|zgj4Zsql`y!@=zh$eOGq8 zmU`Z@z3fbEuecccT489R!==_Z^Q|(hGT$0!2lsGd+y%lS5a`JvSe=H%K~upK$EJY< zgm3H0P!C=1*BP1ofSstg-g>-iNxlbpps(NkD?qtdrAID{ng4Jded|Gxm$qN_OG_^) z5;4sjLD-4B0oRcymB6Us({pPqWB(NQ@XlmdNE_4))gKyw=EG5|h|9q+v zH$|=Z0TN$MZ94epP|74Dlwcpkwrk!@bWMi;2Nn^9KBNmG%J9g~3-fqZYcC20t(ENz^>h*ct+B8@`eA!j)7NZxm z)y4rn2n}iONNKGbe!N)pLqB3QI#NlCGK!p3BW-Z$95Oc46lVm}X}7{9V17s*>O;%n zXuxjJ_`hTc;{Q+RVL{GPiHa!e?>eFZ{Pc;({bn@2zm7h`YL{Ki1@ZCx$MDWQ;qDxH z>B1W_NCg(~uCGr2vXBn>;hfcE;Kz~^Z;$(baB%^&{Sqx|Z)%yG(SG^bUJol@faJ`l za%Hm>A=f+AoOCzY>K9Gw%h?A`i=FshfVNx zRRbZ2pKA}Y<%b&$_=83;W*10$t+|f6?}+)+Pl^M$YW`4U}8 zrae^!ydF53WQ>wOIwKL3CSGMvQwqH#b7ib2N_S~SC(}A&PU+O!#91f)T|Zes%bKM4 zW($-P%!)8EId8`H#Y0c!@JL!3BE?pK+l4(F2-Wg{&|XJ*U_L6ahDHg7gdTON&B=5`9L<+C3BCki}Z!WaUto3qE;B+ee{kvOuchybBN;+j z5?0lZM^_W=XR$?Dj4_*OV`(;{OVs+WK=jW~I+^j-7Q1)4T45e>v3V3XGW~QQ8@&Rk z$(yrEX7cVekTr|4mKR)3G|BF!UnDuTM+lP8NbDSsdFZv=d&OXF9i6F$0&>MA$Ul;{ zuj^EkBNqcjf`XjtT$P9C!7WrHe+*iZ%IQVc3%xB$EG}IzDgu#hO26#FdBTx zqgS&pSNLXoCpwH_7V0z>>I|&wVZSIUb=Dnrq#85y(77?mx9h*dKm7FCZ^XHf$d+h? zHY@Dedl;r94<&;=U9g8FGj})wL%Dv-Si7t%J-<^;EM=9pBUX z@=cc)8`3K~ajb*qJ|rcVzZP6?scggSXCT5_o53|!R)8L*TFt|X`%>_dbo{(Y)w6M6 zoc~f45EYe;k4$@~J^v|)&pPZ!mZo)Ub)ma>lV7F)uwJkFO^k1dd6af;hr?*wn5-jM zPw_1J`q9L5HHt9MS}9l;k%qNFE91AUpJQyvN5W$a*)Zxh@dIv~XQ!Ow`Mfa@mZe|> z*P7#+v%@c)Y-4AnMz64?*(q>UKSugHkX~~2+{`=IcB~p|8Ct2VJipGpw@z(rC7LDg ze~crz&_?#5D;`2lW#(RFG}Cq=#= z!9=B{2;fe@%O5OYy&pgS&MtTBKg6bj>QfyED2uqlFylZlU5e<)%wC4EA%O-(t=Mv< zKi;SIfP(CDfo?O#&UZ;#u9?+Kcm9(0Ke#WEdjRF}{UMdQtQiJZLA_l^sB``~P>RmI z(xI+3u{!Xc^%kF|J15bJ(!sO+;EI!Y5Y}7gXge%~$SmVh?FdztLWe`wZ>&6&1TxZL z-@oGwyT=i8>kzAKuN!&q?|WxXO^fZi#m1$4JWstVoq0vS$(Td(nD;XYV?Jy=M;uC7I7Wr@(VAVC>bKLVCcn)6CMu`l16zA)R;1 zD~Rx6Apf12&2nUXKRBC0e#i#!$mkpF+`|yj1@y5o zea(WEQnob{_lC|06z%4Ne{0UhbLNK>d~X^4e~A#^|37JjHdUG_3FPdrZKkNrZJ)Y( zj)DzQh^N@U+aH<{7daN{uvUP<0%!oD*?_bTUCOU*deSqlhKojYpRzBg2-6R%jFi?E zrK6W7rkgA0poxoa<@IeQCQ-XW!E^I5gl}=Eh6n>vvUkhaQeHo0K8DQ1J~jQ4(51!^ zgVdz_6VHdGI8g5Yn>-kc0Ci2K4|{%!!kWa0tjzv9%0OQxk}9P{?{~V$VNDVe7^%Gd zjcTT)Z+$4{!eIZ@x-Khud*-Wy+@VEJXo?fes8)w~r6H(8XmFSd#j;*eDSAyQ#v*$$tD}tL_WY$3;3foOaD_2kO83%N>i~ z7>3BIyV)mmyK~0(RcOYU1h+aP>G@NnB_+{R5d!>9h(E+@`ryHREY=GI?H*_B%{L^P z03fJ5Bt9IbwhVdMDIm!haWKI;i>A!=V*Aa6XFP6)^!7K3+GF$A*PkuU=^SDLgu`tZ zCPErJKlkhEZ6H1>0cAX9Muy%M4S`BDa_FZGq@XW7@P+Pzz1X928_dawJK-5$wN(J8 zO(<96^s8Gh)0FgYKKsIHTAdfMY!3Tr+eNbr4=3lcie;<1qz-=ZV@?q%!-3GybJNVn z4rHmSC7LBo><`KrMMPk8H35 z6rHoBvqws4j0rgWUtU*Bhqyk_w)j~R#SB0}0KpHao&ab^e6@F>!-L2a0hr@#_`QGx zr}0BhY-KiyJ9^d1vih^#@UXs3=9kYzgt|aW4@9K2ZhjQ+5&FI6ikT#FXvS)}IX8JY zmxisg118OqK!-foJBvkCFk`zttx0^FKgm(7wqJxQd0Qi@`90<^d^WNZ@+IbCam}Nk zuo^D_WGmJx2n(5SYtR#qiqU$Gc-7UIxJhG}t{lxni=VyRRCZ;tVAXVE4Qp;9U70rL z4lw%+6!{%&+T|y;zB!Y6{fe~lY4K}AXjP`EJHQpLzJmRw=Pk&)+VInJ3TZe|FnwI- z{5SmNbl2ratBEd?3#skj7PzZ2b{?_jl9KQ2@@~rz<`I+zeODaBeS4A+OTifhCSvLt zmrMTn(3;;ioY4CB*Ix?#pHXuDqXp`iylgLu8RmhIEb>cD&gW$9sk9`a5j`b0TgwKx zIVoWkJ!@iXu<4ii(7h2pwW=Bw`pUo{vRlKpAmxBWh z-wDh0;)t70URwoH3vXuYt*&xaQefsfPF`chk5f5fcUg!JWDD@1XQSjL)^AgjX5i_^ zfro(lp+VP_K`~vYAtuAsi2vQ|em%0ebfv=h9~>z|Np`6Tth+AFx~Dloi?MVLcS|H3 zPRrzqwgh7Ad~7z67$K%G>wmjwvq%zucC5pH&k2hoowYH}2tE$S=Sj~P*#4s_{ek&b zdJ&CuG(>m0nNvX>8QI+ak?GJO&eX35`Lo?r{ZExymj}B`{WCpT-gYRaR^sPAI8|u~ zb8>n9?P#p>HAV|B*l2W(JJ#jhQtFV1J?;fDy(Fq@#hkGvOBxMh{={rNQJe4m?x-NF zR@D{avIGJ-qzKvc-3>JBmm@GpMlbZ)v~2$2<7qAGQFot5K>h-WfNB@f*8aNW^^M#crE40RkPV#Ip5}v3LKhm>pf3Z3kj%j8 zzI~6v_@9Q8H8CU={+H~LwVBVS;dWoN==?Pm_un=g8> zO)_umn(79^m6|2}bD(Cb;8^X3=ne_;kd7XnYAv!&qVWOC<2R{E3%M>5ZX)9V`uTM| zYXyyyEdHMPq5#J{$Kuio=w}s5s@3oxP<-ghxXm8b}K4Rci$&{4{|-;>^!|&mTRe> zN8r6>w^S}5<{$ib{L0`o^xn5nkzXlYdUAi%w1G?df0S;{{~c*i%7JK&7N5NQ+vBD* z8W<_fDCayP7M-)*e(N3;ZLxuj^v)hRqx;~79=O(YUWl-Z-`YHlU%WO3e{o$}L|+)o zZ~Qx5KD%m6DHN7@S3>M@A1qPv`$VWJUwHTl!M{rUL1SC|sBE)onIyv4jhvJ42f!Ju z96M!eZqrp>&0uPyG4K)}$5`t1gk@T>fW;TPp8Ug%XT9d0NuYc7x$>{>6w0JCCY_^% zBD(n0Mbl75A*}ui-zzOe+9LKhm85r0iMs`xl7i8EPbU_OKGNPy?vEiSqt6bUHcx88 zeQ1&8H<(UEn}WvNmgUMoPI`I`?{zj)ha1cJYc7|ieC?qGnzjUGqM#%3cAie1pNBV? zT2qi}D6BCw`J0(Z3G3Z?s&N@FA?iP^RtkF#mRdvflj9qB2?~tXf(~Mo)?FE}B8%I9 za9Qo?TB0X>wD#RZO6Q&8l~}LEtA=1rLi*=Lr!k+5ic;g$P>NB^yt zA^g5B@is1gb>yi@qYY_A{aK6DRICGkVR#1rk8#N%pyg$3_Dz8j1MAeEo{>v4s`u{a zv~ypBGvv^4DO?|Kk63o1$W7*y1S8E*LyMXNA+5dU9Bq@eXrRh|3+D#Hs;Gky`8k?-GDTi-clz(K%NawNh2gl%s{WDRofY;8PXD^J9C`80A*lfSo zdQ}x-I7#~_tjq9^%7%YQ4n9Q1>&){-se@=M3Nidwxnes|{-bzJffmM5+ zccVq@)H2~lpz>WkuY*{5WJIS{ga*w4Q|IsEbAFL7bFQ(FO!oO7{q;BHme1^zeMI`! zPX%!qL<>$4_s#CehCEx$=$--TsFGgc)xtsdd&jg^cs_;t-SwHaGWj?j#{LD9QIs1D zU}v-wymMkS2Q4EqNy_=mknNg3e3|lE^j0|(r)qwpimy6YF-A`2-!_*(`F73056kPr z6NJhItf+G0h)Kp(R0;SkOQd;+ZPfhqc3*i$uJ06I>@?F3$hLsl;aRfDXo{LlKY$43 z#YXp>=r)^7U0Fz-+P&TiQmSMh!lSJ;<3Wwq?O8H=6wa%|jw{N2GV#uo zyTnx^te)}pMOzmyv~3d6cc7s8{+!(j3@O>!coDNzgiw%VMV4L>A6e(;akc|8orVoA zq0kzwCU(z_Ch;`djbw%X)vJ|4OPOD;JPVA@M|n1Y9C}*u-Jr-#34B0Cw3^DPP4@nL7R6CIg0XU`Mrz5< z6NP23qX1pzb~`jLOjPHXOn5|ye|TfDJFYP=i&BLZr=K|k&fnGS5d~J$Q@$k zeKHkR9&%Be)r>K+;;9WbJnToc_A zzA6*0OC5JnXz`|J&5Wh3SBgnaXW|fh^gYb9UJGw!m^gozj>|!tw&dz%a7>;zMgx+Vw&q%AwQfUmZQte z+C8tDf4?VEav~dNE87g5KKK2;=*49?0dWlMh`=cma@xrE{N! z`VRO@k%BidG*f9cFh0!}5B+|fNAT7yF;K!d&b>RgSHHApiOsE)o<7J?w*exrM3{i* zmqoGjt8%QleDCDtEne$_XM)T=M#>0}?W(aYbhzJ8)X+!S`=LnMNv?eth*u`9M2)Ww z1&@Z;8q}vGL38e-BbQJk@bv|tr9=Uzp{ zzOb4{)D9HqAsUnwleu)S#$U;xlsT{aR9%Ty#`IG^p(Rb+_&ZLe>^k>LL4$xxp7(l) z2txv2zNiT!;$SQ>iA~dNVdfev4pqA(e!gyzKr4R$)|?r~L)NSdeD`jaJbmjAIia_K z>5o05@R`}}5M$hlIFD7eV5>mrL~lr6eC@jCD;nrOGMlWFQJDFoB%+BJNI=N-91P)X z8-#Vo{z`dH+kJclKnI+J5uXjBSv>Q^P1Aq<2e;6u1v-P8g5YH%Q;DwWsc6fN8uOk# zvF*X_n7h7p>LWtu;|1LbMLeAsL~j{Mo`3Y%#)%I*tN3%z49`DDLcQ9SRE0-!$dIt% z^rg!*ci2Lu*^!V@E`{Sb1sHo3CglZVyt*?sdTxAqOx*gUMY=|3VP>NqT`~KO=Uy(r z^o4|udug%G#dw0Mc)F(IddLAR+3g(SE!eo3*q|bL{@`P%Ga!3_3E>>@$f4QCIV33t zCPAJ)lL2@Cl3BlMYE8o9=Gil!DN7=*95rz~^i*(J^ln3BfP>D8ae9axvzy;Jrz}}G z8OE)Kvom?qjKJ)8jwo5`*>y=b7k{TFjj2B~u&iLpJFlh9*0Gz`Zod#Cuh;Fue7C3( z30&Rs7xC4_yuS$@a!Re2+j!tklW?gf^f_6`=Fl772&Ees8A%swW3g~NRfwF=w^s(- zFH10f%;jR_^Rc690g-~|5LM7LOArdUTMB~mT5nX63F>-9B0PM$I`KN-5XeAZlEXvs z_&R+VXsy*PCsD`XP|wbkFLs_19wd7Svje;F7^u4uw1`d_I?|<$4V!cp#(JuD5Y{}T z1d+IKa!C?EYBTAuh3pwi44-?HLt&X;Yl)`r5$9kuY!@WjTH4SPyoG6M@%vBa$sWW% zMyg-{y)!z>UGL8&5)9HdYist84D!7H!2x=HH}TZM*I78k((8vOHp#^WxR+nLcEZM- z1+yWb?CFm_2C{XT@2*O>d+PYIlH*j^!E<3J1SWWY* zB}0GcpDvR#r57&+P(>C}-i^dU8IW$#M4U8r;S74-q6<)g1O{ae<8*|e#-aUjvC?-g zKXwJm{45VXkzGRy*6z?CzDuAd{YRxL+Lh8C*gwJKmJ|UNF1t$kXxRyE_xj>qDw%Z7 zVIq`aEqG$)ucRnbuVhpRL+_W)-(EQ6fUiyKTb!w4cY>dEzxMSdZ~N)ZoPYt)J9}T8`Cqe)cFefe$5e!k(-r_Ahly)OzZf)+ z*Fn#p2{zU$msSs3M3k)C>-L&Sm81aYr5iUqkZ?rTN^Sqt=7eaqlh&Y8%otvBh&aBL ztn#IH?A@Q>E~5f_1>Ruo$v?;MQ3&i zTi7~(pi}aJK9jw-sGWQ;?5KXLynT(?9?1Dk%d}Y7NBWOG#!R}KTQr=Q1omBRKpsN8 zqcQ{1rxct^;N;J}sb|D!Gr;?JSxL*gy&&l=|NUO?#S(JjCnxsMaAY5Swf22b70 zY{|8Lx6V#gJtlQK7p)G6l7p8@c$z({@b1Rdt$Yf5DL+hx?qpn&p`wJ6$Ed5kFLwmpTNssqCkh z1CrUs{W;v(#ouX!3R#~Jhh^beVX359gL zApOvQN>5+hI714|1$H0%>i@F8{*B^coPNmwR7S#mEb+Ec-dGttNmx@|<3q9RRbe=2 z;gHhZiTJS}7lH-U0NP!IBYEAec`Nhc>*Pv5M62qWKa0{qE9>cN)oaXd=&n@@LC} zgE$ibNmNo?=ha*A0=;{=QW0COB*nF5_mSA0w_Jqw+PgjM9)BkCV(qe1$^n2Lr4#P0 z88_mWLMXbk`Xs$$u-E6UBLDuqPC)-PF|1mb%j{n;>&K6$=;DZ3_g62@*dl-TMQU9- z=e4#pfq#e!%*i@Dwy~XM9(W;5ek-FL9A@Q^Pah})-mO@MN{oUHAa0{csH-|0Q`{V% zBH9@v40+jfUilSV+`I8x5-LRc6ys}U;$v?pR#jQq+QyB}*E8wSYNtiQHECLmt~b7a z+TS>G-$M3C!Kr1~QQ;oAX#?b5Zzm+EGHydQyp_6^!({5}g+op$Fe!Wf8QuvTpk;&9Kv({R;#1I{YN&}@% zYgLkRpQhg>9$VjO)TfYp6wA7|x`GYvH?_&3`1bKpXFas0eIXxpA*rKQGT3}+<)~+E z23Cvox~L-?=P^Ne^fD`5)Xc64mMKFQ(F=2!IfsM4pq_tG>c zq?zCD9rg)S`ESN`#(!j-Fs!j_DJvr$9J%PxcPklrX#fVsJ!Y=%vYA_7 zgTuNy!-X#lJ{UC^XB^U}jU_NlUvYmSVTTD~6!Z8r%zqYXsMYSQ;2*&t*F&I_N>p!J zbNB$&IyS;@aBs_PVHXV2lc}A&8}ATwp@6K@n9wNMr2$%YhSTRJRtQ>Mk_F+}*w1T8 z;O=T`ylJW|hF!#_OY$?e1$k0=)ObhV%fLam)uvTbz_nn#2ZnA62221495gf%z~Q_O zR1aIAmq$BPKL;si*W;%+LqtZ-J|Lwy3CjjOm*n;U ztM#yV#J`8>ZCH;RhMaZ#7+cN|1xj4w_K+LXy|;ekOEZPNt4ywtXSm>!l7=KvE~<7?6qD0?MQU!8{2ShgOX23 zZ$$T2?qrEv#Q&q)it7T2*9LVgxOgz(A^+-Oz$GpXJ|?@x^rHd$8c(lf4LLJ4Xj_at zGRTmfMD;*9brGLGsB`4EO{G@Sa_jnsE=>x$sXZmb3@7mMutjHU*K+}7y{XAHF45y! zaW?QYKyk_YB}4@dK7HB3Ck7~14r6}xdS)Ckp^%$No$?NnQVO9Hlhc22LwK3_`bxMh zA7VWEJ`G*h+@_sLrm(Lw0RFT>-Ll2zeObl^|Hvs3NMHfw)#l3SkZY8ar98+QLWc}u z_{?skwaCyVW-`&?&9iG8Sr6eua(9+q>Wtzwy!hTjwPo-1Tz$Jx&0fz#2$92oa7^*@ zI$!f9>ossX^Jg2W4miKUn=AZrQwMjuQC-sq(lDS-huKWD4b1rMKtF4>zPtbTo>ikS zDp@#X!vI~4n<$+nz|_v*MT)Jn#b*=1e{dDXNrTqbpXKY}lRPO-n)Ei(?wPo=heNCe zBzacUSf+`ytXOp*I_Go0d1TQ}4S+h|9fr?^@iHFu)|5~*Y5ci42=W?P{?EGfzwi&Ya#X(dPAFRk#ir;cQ81waZCIL0}7)|IEVO595VO5ZWa*>Jr2GpC`g zaX8_2N`vRFpNdcC6jVg~BXbPpSBl-h$}chczrePo-Z!L>xh!MHnza<8vf$(EdJK~} zu3jj?U0Q(sNcT8UlY&O^8Y*L2RpeB^o`+BKtNpkXj+(W=uU&+2LT^%=3c~dp+qiV> z{Zv9Gon)-34wlVdTrh~7v*aqjOR9PH-(#O~>f&gc;#?D#zs>#w^!Lo9v^n*3q(wsc z$5M$P?A=&{e^Y6KW-LmYfI#2A6K<-4OKY9LDCgE55{Tp4HNu!B)Qv@ou&Xhp-x_hL zZ=?`AFIG1jR9-0zdQ!t7QgPtYL_c!h6fXo|q&&rpuiJ;}G)2i*0VbB^3&QJRa9rd5 zddO>Q{D6QqvbSQ%jBuMZ1VUlevaSm;iVFHj-n}v<270jNx19#sk%_CU%o8|3yuAmL zL+&Bdm_v4>g6;~n&o;?-^Nggq0$f?dOx`KFar0IzIa5=VpE@V(xu}zaqQKVC9Umi^ui-3N)`@?G$d{oYT&?KuNk(usI4tOE&A4lXAjpEl(PMA#Fcp@ zqQEG(MEsb%>u=-gx!z6(`-OwDx<-0Coc8tn;_L*;!4x^<)1}U>DMLTjlf#bazBHrV zmjcY;n%c=E)5cWB!DiX!@e*3?e=rf9rAz?*V$sNaMA;%AKaH0VJ;jbK#A_&0P(K1` z`>j{V7%(!v8aj=-#b^Z}`n|KiJJ=wuU|4@_<=khcwHbo$%V!v&MHe$_>cY3p4D#111a=_VtFGRcoRr)_0?6rI*ue@Ue<04vuSlxv$q z!DKNA%FL)2go=3V{$BeEE?w1{g_qH#`T!gtlJ!yw*va z2p%h%qD1@s3OqGWW|ZmFV^O^IyI+i`UY)va7q?7NGmCqowt(+XEsb2epq8>X{bCRc zID(jd^koq>6;8H|qoy#}70)cszwfKY#LK=~XU->76;rSVfQB(sy|yPdZ4i#R@*u{; zRFGus{9J<3Sar-KkBuY89BC*jh5tlMxKT~chm9J}lkkM@;L=sbcQsotz2!`FlE+L# zoVC0*v;Ix32Sc-ne*eNu#jvsAr{@X1dKaAqh?r$XE=#QUl4;EW2z<|&-!hSlP0`{C zRvHr~>Fu|VOqLXg#*lMA&{?U{m`bCTZfdW97nOT5ezT~?o*kyIGk3Ey=N{9;NnNX( zJ7KE*qm-(~(X{u+C%8LRy|^?R*ovS<#`xI%WyR4FS$ zaP;z?WBTmDA5D)N>Wd^dT#}j30?CZtsa|Dz(lO}*NKg&anWeTo2O5DymOtWbT_($2 zK%he=`f^)ho%X>gM`} zrfXuV)A0YA+vNWHU(p+t5V`66A#SpUyNYf=JL*I34ujd*_=JlvDuum?M$@3aDf|kW zO~PA#Z?fw~4!oPJ;W@J4?#&9Pr*$(L#`kf5ZKG(#8l+h;eB1uExKYn91lKO#+Tkf4 z>M#7xWJ}FmqmCYU;aOTDqs!wlG2_~)<)vD6Nva_EeKt2G)`vHiUhty6DlvWb8f_iY zphI_iQNK8FMLJ2B|5BYY#%-``_re0aZ_Ii5`ct?x2Z%>3|EtwO9a_r=zg{PwZD^~u zQ4s#^^WN?n#q8Lj*`?q4@6;@*_6@@aGGl9agE{?qoRRsSG4{EgS+uJfm(k;YUmW&Q zM-)l3pQ>5*`W}w4iO%wR-+hl-fKLOWr_Pxb%L&=nEI8~br1@Ct6IrE0y>msYSh zcivagdAN1Z;H`U_WA%~73|-I4(c?`L7UGy@$W?4IWrwOS&imbPqs+aqEE@wYbt@zO zk-t*pZGv~{W1-qLL(r~F+sCT2&z;?q?)I}y6Kx_M?i}ydfnt~U=7NLN@KRH{L7WeD zs9kQ=c)j+L%iCW!2+rB$uP8S?Vg+H!MEHqqp*s_q`5KNPfK(NBDtO!S$E z15``0QrIgZQURph@sN`E-{Lng79w};hzkm@XQGI3GV1b3?$35(hkP3OFnP$}37#vl9* z5&xWRw87%g^D?(~>ilp-eOc+Twu$nUC<1i;oqHJ~xBg3xiiKr6IzQB=g5pYO#=os) z-p6CevAoDrJmUG{^zHDTh?BN%l;Qm__SB_wh5L(H!nKVGVu{`tez<5QH;PY)zh!#= zEPl~8XXuntxL1Tn)li|Me^7M~GeK6~zLeG=^M^om)#w+``2L_#e|Y7a3G)>oyvp97 zPVeYaI$fz=<^2*CBHdxQp2raYm`#YhE9<@?@O4%8S}&?Dy;?EMi!=EU9?(2r#Kijyc0?Os0ROgk`Lb1mt^I@Zn|EvD&16ni7uXVG=q?of zBbQi>*mO0mYUsUCqfaQYT$*<%!cV86|*!K*Ee;SKbUUVyQ}fG+bkqZUG_tjx~@o z2k7PGcpavggiuZHu23$i$OK-I#hk{}$M&!ybN}4^IUARd=lwqfK2Jyk2uzQeZxORJ z-GmVCAln#sSz(8S9j_2T&gGR=8>CphT?A3ZzTy?x{}z6J#r+S?X!&qh`OAFFhyqm1 zS%JJ|WHRK(&%P!(56y`)^7V5S*?R$$wV89*#`f3Hl1}0yt6W

s{D%p+8m_mBd8} zKu=?D4*s%s@R$FxZwUd-8fR7+)nwaFlZXCy?hxOYc?Zg{-cmT>oPBu&n3Z#IR=}0u zgg%`WM?0L1VsQuEc5rIwSC?}SKV2Ylnem2J`~PeZ{|mpNtDfUA(P)jMoI7=qdr7XK z^H`s@q6_0tV@&*39n-3K{Zta&)PZ7nx$zOP{?EDNl1W4tc`pEj|J-;R^fEKHdosUQ zKWqe%b7WPh;fFG?Xr<%|PnR!_YaV|38)tL8JCITAfdQ+_v2U7Fe>8#w4S38mQGtmql9=8y}F)vWt?t$RJ$-H@KUo*Qp%m07uuxS&S@L zYfVABkFF$hpE20lzdL#|B{155H@D_Kk?W}Fs?ren-46dKb-iq`HsjF(@9;Ad550p( zji6(r0BfCH>xTcAFGic^5(vW3{`c_V9M$D@r+AbohqxaaPh(5>u;SPUa(nw6 zAp{?@>b6zAF+y>`hAu7+T4fi~t^~Vvu^l(QOgn02MN%e;X4WKn!ri5DJuJNOP@X$@CSoWm) zcTVO>O<}KNAkfjsf2_1`!=5=GKsQ0*Pns;vIXPBq8+gn0!EJ_S5#GG~resOnX&NQV zNwxAuV&~afp?Vn5YpRw{Ln`V6 z_T=R~<_Ye4;1=yO&L`==mjSZVeYUXJH6*siUG}d>y|r*EX3J0 z2>eqa>QXT^D2aV|;S|2dqwy(t z_&u5B&idZ8*-6|8?6e5;pjVWOe^NXeXmR0CTAH6ekDltZo6H#q?(PnkC_dc-BP@r*5=BN zez534mU;Q@_t?aHWMJKq`jhiZBEdaY?fx&spyl$95+S`5W`(~@!rt}ZpcRH=wIJG~ zoD<=O%!34Uhpds1*jSzyRA$P$~4=MRS#5HHWC>CdEQWgkE z3>7`R1zg+-*rBCy*rV6dkEMMYv&XIgFA8e@^1fbt>n2CB{NZD!6Uia(o_#DYSzJyb z>TK}$*qP=xEm@~-P0#+QT5L3{__cIGyXHHLlFoL&IpL1w$kHO)KYAwptH*J#?GqsF zt5VQ@J^x6yoUDWXt0e_;yF}DIlHCKA{N0}2b9$mgZ0zgQ9WN{+?ofn z3sQ*bVZL=1iiV7`_2o?zS@|1-de>mQB@p z=8wNUK`hCv6*GHj&zFbeABFVaAB9jwAE}aIG&JWwIBagRn-Y?f`1sG&H!4S{MHBtA zvlN{V{a^g?@x$78{*_Hp5bF5Qch!nkaujEWBP0pam7B?b3UMv?;DUSOjJwNpnrZGb zA^SRwd8r;iI-ve%O@gzF?`e8Qk$YU2X6w1g0Va`dp4txxNn3$&t31I1y zJV=fHO#9JCl=35xHRlu2dIqPfzOhK0_xjn1k#l`ck$#P8m!t!41fb@x7L@Txe8zl~ z*6!|KaP5-ml6mnr&Ceoj^yyA1kU_35?dU}HisnPQd1B>>@tBbbdFo#U+IB5k* zlhbE{q87Oiddn2d^T`_GiH`Lj*z1-fwhs_}}p9-JyxM>hcCHct0NPXDtS zF8<&6N5(35|D2Rv-`93TB-JRSBTE(~SfpSkMXfr{doYZhm2*=WlZyxuzg!>BxF@IK?!6r&g_fzuyD{}PYJ#m@$$1ciC zPHHG$0P0jfZU$v&eO)&3&V+mFR?nTyZ`Df4nt!vws0p0odqF0#se_RnM516;o9r^h z*fL!q8ZRWKhIiV|;;t{Vpfc_mznp?Gu+!v*JX|`~J$lV(`%J;856WU_aJubKCCRF; zbzxT_Ei(Z!xRw=Ybt3E^UuCGX=>Vf8-=IX1MgDeIr4Q2I0|U9~5W1{9LKm$`b z_~M*o#t6G5?gC0_h1n-~TUvOPl`L>6eWLfcsD-pA1vz`Jr@7?hff*QVhfryk*|j4( zA6r)rQCi6{?z@Dg4G!F6P%o&6XA7irg|473X1D0XemrbaXz}Cpuf|b#ph@onY&usK zw&36bgbEeLWPu<@z3G5oI$}AuXN4cGIo^-m)v5rFE*F#PfEdg46B26TZ+r3X`Z4KB z$DK^3(xe3|5;=~t7WrReDC^JEHH@n#g?zI9gA-{AOLZMykUgQNvaZHG7`gPX+7y5> zeZC>O9q}WwnoV1tGPC^a(1A&e8_e1yYtHEue{+2F1XlzkK6*GJ-hfkgU~h3$U`yhC z3Cw)eufj|rPo0t2)ZzgIA8a1-%&z44O+%oTyB7(>lHCR1wPiI50fPjLK z2tE0;8d+ga<^J%|&(42ylS8M|^+8w%zrqeTeGfNXLNq1vR++3duT3m=-i8(&O_m@> z3x3C7`GY^{ax>Kyq0{U749sZ}cVJ@FZM99m9rvs#pWE;c?LD1ZF@W-h#-Pu0Hs$6MUbv^5Cj5B6{LyOLziMG5kV0F0SSgk5$S{w znt&!GAVrEuQ51rqV+e3)p+*7(wmQ=r^uYl zbzIy1U3IW??sSC0j?FR<{G<*M|E>8=5ne3_ z3EIZqx+5vRp_$7@=9vOUP!@+{?S;H$G_6HAtA+R~`5p{nxl1)4eB=alluFF-)tM@1!Vu zOhi;tN~$3p*&tS+jKds09wL`DBMDt!svRvS@$sTQ6f9c1wk&VYjA?*#N^q1j0UxgEX(7?V7&+sW-*l$y?WbE=N?dkE}#*WEekLGh{*zx>A?! z9=}eM7;l#4^I)+=j}evJae`5l;VLt_q+U`jzh~&TnEVrDsZGV~2w_x5E%f_j#O&vo z$k#77_>r@XFjN)_9tT{%rH@QIde~slrI8b*4nL%JLQFDIyn1URZ-*fKpZcK-3CVs2 z6SmUzo`bwT%{lQe`d>z2iVr54&MfEswHBQ1`I{eh?^tM8E#qyuLb75^QzGL8P(fqD zlN0_y%ItFy5wv~{>BzZzA1oR8U#h!DPNf=&V;|UQm-5ud)E0%S(mGmaG+O5B4}Nx_ zob|D}dr8^+W(JJHk9L#&5AH3xo#dm0ReKLyBL0|qJG?O#PTf?@^9*~^Qt=z`V7|>Y zp*p@2dp?l0VAHkv*Z7;5_sByxcdZA}^{(CgCG5BbpZA%`?WZ|Rj(&U4vfLuqzU32VFPReDz3IyT$^lO(;_f z6b>EI>oJIU&%i|PCtjKwWzqnnxn1OFCCD!Oif^X0d1?mp4IY1WI`7R}6-2AGQkg3B zXr<;7)HRnA?DKPw-gs(YEWQqkVpgk98{!vn9rTH|wX&&M13xt&`3jFWUMX-FJc&SK z=Qxjd7S`mIdgOhFQ8RA^Hy&>&rkE?A*sObK8o9b8&>F!Urjm_|woj6&V3-}cdlUwI zAhh~GKG>3&YQSoZ;MjYhDRw3jVlaOjwVDZ0n<9LY3Y|VftqJ`gytF-6_QT}hgw-yBv{;-r@;eKd~O&)Kjd z(gHiEO7_tgPW}eC{xH9TEG8)1J0C)bJliKN>9z`__W2G=e5mRKY4&E)ZaUB(y(qhI zt7#o`CZUUikgz|jaO|jaM(rTZ5ywCuRwU3oHe7!L0CsyKU$oK@AB37jJ1gS@8!Ejb zW~2G6x-)=LrF(O5n^*P(BSf|}Nqf}$+5j~vHhfq5vgfb)1;;!yWE@fRrqC9 zJCN_r2wcJBd+Bve7xpOYW{aisfPFSmZAQ zZl(K4k0i?-oGaQ{qn^xXsIF;ci0IrNye#~Yz)*fQHFJXnk+|#@>mU-Xxxz}_u2Y}Q zDHC8Vk%CTRJKv>PSt`c0%t>~5IsUx90jIythRxsBJt+#xM9P{i)_S=)xCymPZ}0i0 zJ&7?+v}5YScniRcf#+^AQjZiqx0XML&qDCq=|sGY`-2QJSLmqt>t3{Kj-i(6fW7T^ z)4*{dy9S=(n=N@?^z(98EupSH`)@u|S)EE23BiK{zrg~se zArTcp&$o5{=C)`v9Ub7~J6PS}WqS%J(qm@o~Z;^Ku++lW>W;eKtzP$cL64TuL zxzs6IR=77ivuMEL`$qXs-I0*eMa>G5u*V4jQSSkuDA`U7T(7nu{`03u5-ho{(chWj z;2F|ub*=w5Ann#2r_03EtB9VTxuG6eOzd73=5YKP-r~H#nJukz<`EuZTi^LIr|rjv z(v}Dr)w>&lazUmamoaEIGc-pqPfkV1v)qDD6jkM!#G#9w#|$s~iGYKnwjX{%B%3Z^KmeANYTz(h_<^UTzME|=A!kSJgq64l!w2JeWir0GcN+{kLu3kaHlu!iXS zK#2aDPp454c1|LGxdOCmMSrfWtR!<62U7rRt-7N;^kDU(r=^6>1v$0u13?0iag8G_ z4Wz|q$rQr5K!i!UWi1`c87Xd-@JM#k(@}lNqY16yckqRl1M{ucC!F2G`+R)_oWZdj z0pGu4;S(0y)Y5masQb%GszTyuG>(Gp4o{^Cg-8HBs!QU6$kk(LO#FxK;Fq6vl)n3k z6>xW$h8ylJytOqJiWPFqitAL@ha!Bu6G^F8s?rX6nb}-)Q*3QAu>~K>63)qi7}sIQ zQ{u63o~fGh`@)Wq{(YQP5vpHN-_9d%^e0=7b;C#Uv-(}EO`CpzN1)UyqI39~LrbD_ zEdElzitryts0%UJmg1%H67Ci{0&4%X2J1rRO5Pye^JNvsIhK62T|O33kP!QA1~A^{ zl;wK{m|Xa2NqyOtyL{TS6BH+J{Gm=?oATV8zG^(}p7O0I3Yh$n_y9(Dn@uEkp&JMwa^sz4KIRm!0=g;y~HZfSdj zFGkUNmRu0vLdUB>2bdbs<_KA)|8h>9B8yQr>e~`BH)_~c7I6W7r7F$AOgXLlK9<{O z<`6EY+NjC~+)O>tSQ5mV#LMI(Dnd-ohnKO!Qjl$~=RaNn;HHRBHDOrE)!?S`JCBz- za8mA}B7^v}<*Sb7<}+}sh&DqSul=voZz9D0!5#jGW-{=^)8O>a_B1(dC1vc}+UG__ zSIr2V5blc#DPevOF>n0b7@pSvhXy=ubxRse7=G76N)oNg0 z9yxFq$@VE#fGNwv_0)A$_7gT97UvkOh z?gg3OfW6(mWm|id88hqd0EnW;>p7w!uRoA`^d5@t&*rVzzN-FLLdT}L5o9N@-0 zA{!6ims)uISCS3mbEJy<8IHx!p6zr)4vhC6JR+rReA&aF2j9Xy8jS1Ay$nKzYlj~>dQYR*ToEr+>jIQ#}2c};>q zCo{gPi^$m&MykxVW`*7f>}(jLMhYjc8tmi@I6T>_NE4f>^vajQ$H``HsVhWWo|3=i zvT-J_E@2}eZ`104y%uW2VNqe$ZeNZ2v+$ZlJKG6@7<$X6GZX+pIR>K8H>7s4_O;R%%#lG!Hca+dX^A^KO#A38);vxvPQo?d3>ONt3OZG+NAg6P?m zk!{h^(aM^@4U?<*)> zTKd>m)`rKwy4@mL5z0@}$rbS6tbTo81k;;nRK@L+ma+*M3*@{w%*B(^1Y-#L$ss~L z{JMQ5lgEItly}7p!K+r~5&P#UNe-uUwZ+b^m)9lu{Uhjo^D}qFn@i-oo|HoVWn*bC41#QJ+ A%K!iX literal 0 HcmV?d00001 diff --git a/docs/images/kombusmall.jpg b/docs/images/kombusmall.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd3b8f8e563a519210c0fc6870600034e7a14058 GIT binary patch literal 28752 zcmbTd1wdR)@-KP@cXxMpcS3Lo5ZoaI9VWP2NC*LfOCU&q;O_3hA-D&3cXxP`Z+E}l zx9@-7yYDvCIX|ka`&3o;oT;vEo_{~D09ZXMy%{H4Q+o05HL>@S2M1pZx#b8@9Qlvnv3AUPBx3T3EW6LvbJ!zw>Z){KG$i zV!XGIKNtq?4|aws2*pHyu=PJM+g~#Oz%Tw_3p;xYsLUUo9WCrE{@^Ys4smz0gksoN zP#okAw)BAFNhqd!=Vk|n;u|Q&v$HgF0RT9(KYUk9a~mk;f?_mhEe&ZX76SkzH0%Gs zZ~p_kT6#kD1ORCVM=xivwT&wklQ}aLx3I7fm4cBgb z0QiT^e_8=Je|$>?b+Q1ruz&zN4+qr!f0zH)%D<)l-@%`;{fET4`d?!PA{hKP@850z z&2z{E0HI5$Z=(Ordz%CRb)Nx%Xzt%Ux*q_583+KiBmZF!!JqkJyuW=%C1Yu2=Jw8&>W@>+9o{*(Ia9efnweWtvH$lZ z{y%Q`A8P%F9&8$xR+i3|_RvS^KxY})-WodG_7-4Qu!B7n*#3Vt!vBZO{zHd9@E`LU z3UITZ06bF;0BZyffIS=p;BZj^Sfey(4e0N2dxfk6{Q2bRQf~ia-a|38{;%u*?E(`8 z{S(FoY)$otEv=zNW$xze{s%)p6Mru701AK!-~q${1wach0qg(|AP9&8F92CU2~Y#H z0DZs&Fb8Y^JHQ$60DOUuKoAfHL<8|aGLQ~r0|h`SPzlrl%|JWQ0}KG8z!WeKtN@$9 zK5znD0r$`s86pTBgbN}DQG)0}tRNnc5J&7;+c}7)}@=7%3QK7%doM7;6|O7;l(B zm`IpJm`s>rm|B>2m;sn6m=&0Pm`hjy78RBNmKv5FRtQ!aRt?qw)(X}c_C0JUYyxa1 zY$@6Gu94;Ic96OvSoC2ISoH?8m+6NR&uCNODL9NcKnp zNC`-VNUccYNZUyF$XLkq$RfyU$QH<6$Wh4I$c@M&$eYObDA*{BDB>tuD7GjcQ4&!~ zQMyqUP|i_NQE5}>(eluKqRpb6qoboU zpua#jKzButL@z+^L|;O`!NA4fz)-}n!0^XN!KlR;$2h`7#-zuT!ZgA3#!SGh!W_Xo zz(U4iz>>x?!+MXEg4KXEgLR3Gi_ML#hHZx(j$Me|kG+F~h{J#*i(`Qkh?9lWiL;Ii zi%W|ujcbnk2{#+J8+Q{A0gn++5ziJc46g)l81Doh2cHjL2j3Gv1-}JUnyECb}4ZwB`Cp^Un$!t_o(oxq^RCeB~o=!oluieD^R;pXHXAQ-_tPCXwwAH zl+i5GqR(5)yd&OX!L|jEGMUF+;MIoYvqB~-YVwPgrV(a2G;%4F*;wutV z5^p4aNUTUwOPWb$O0K=2ePQt;_r)jBmmO;IgQZSghZYp2&u>Imwu)l<~BG`KZKN%1=v?c{=*H@<=yB@#=?&9s(Bv z4jKN!`z7hiQ>aDguQ0B#gs_Kj^YC90+!2Wp&yiM(Q*sv1Y^wO^^ehJL+>H;wO4;7dsVhVsqr+e+fA#QG$h zkvg9CBCRx?JUuM^?g#kC?+p2j+DwMbge-(C_pGgKo$THmp`3zTlH9P|$2|MIm3)o- zt^&b=fm8;Y^PPH~lU?dv!`;f=eLZqLoxM`MZGGZ>&HW<%4Znnb)eQ&?)DH3w)(r6v z)eH*^*NzB|)Q<{}HjasnwT?@Uw@=7S^h_#F4os;|jZJG$|DG|LS@~`8duR6D?CG5Q z-2MEA1^9)~MXbeyC9UoKtQ zUEN=Y-Vop9-wNIK-s#?L-+MeDJ|sReKQ=xoKP^1lJwHFM!aT@&f~^2RMFn7iMgvFy zG6)m8hJePjQ1S+0{e@AWI2?onz(Vh!KT!sV1R6K|Bi={=k^YU-KurIXf%X96{;Mq+ zR4y09^B2a0;*MbOUxCM;SY;YYfU2UhnktpFn#OB24Jj=}HPwG|WcdGt?|;@mf5kO_ z-K8uX-da+|R;q0$B0TR3>Q06bh=LLyweB0PLl+&m&&+#=l2_S6B% z|I!Y|CLr~1>=;7xPfelSM*{#N^ajVp!^0g7`!7vlJfq?M#^EtAf4BV+3kQvv{?ZC2 zKNkLPet9h7Kl_CGCKl=6{l_Bz6H`L@D1Xb>#|HiNT<9tY=4Wj5U(bRSf+`&w^B*|& z-}uMB^yKE^`T_c9pHM&kfcck4K`(y5{u^f!{!<1z8rlDnhbo>8^H2UC3=NR}VG9() z{jJ+UHvB*N98f;Of5`mH#y@TU!^Z4?+4zT#@*nbO|Hja&KjqKJ`$6f?%z*#bsB!UA zaYM%r`pfoZP(pg2KwG z>YCcR`i91i&aUpB-oE}{;}erp(=)$k=T_I&H#WDncXszq&(1F{udZ)y@BY{Yos0i4 z>u<~c%PwrFT`+KPuyBZf>;l1fKnWHb4xXAD0Y^$5(aZ^#hUX&^-piPbigsjLUX2s{ zx6WfI1ay3>^rwGJ`^&QbJ;MV2k1YG!uz%Y%2cW@%po0gC4M+laxuHxyVE(4QQ-MlX zWu_zeih6Xaml?Gy6qh6Z9iAJMH}SPT?;rd>10HxfYQZ%FcJ`C~2OIiZjJ?7|<#O8YrsN!m4L^n0aZoPS=0BW1i}Lq?M*f`f6BSFOyc({f=&m z@V-gT+RAZ;u{;@s*=Qv<&`(nu9QK{?x z?=rS*ZX-tN*f11?lQBsrEAN!8K(YE4!b+K>(IVS#D z5Jn1SbQWpr%R1k_D7C#enR57ov-Ft8`wXxhesg#d^D4F0o~%hRoN&nUUOB^ zc8!*bdl04-Cf~=n*En4}I{$H^`+3x(we?;m>jdunMx7y{?4gZ^no!VYZ}^Dyey8{7 zl62($Hhot!0-~kfljHIXz|8sQc-eIyHP)UCc%6F4)ZyCCv0apLIeHziG}?+JJ_EX^ zg}>9~#}x1-rcn#;$41Nb5=sjOo&k{mhWE}XMR}Wt`opU9Ba7#QVNnm4$+W3Qc+`fh z7n|-AquEo~scy%Ne0gIza{7aNso~PY3!<}9F;FSmMbL4-`<-(=-;{!~RqN%Gm2>A* za5q7V<}*M$O?6sw_6(F&)jIv|+4N7dy)Y3Bc?L=*jzG`An<9UMGcW#SoLMPVIbv1505}6i7;#?vZuy zvWy}XJsC9xf;yzdRw=%XS~@>1zWYmsLAnQKgx3b-b-3B zje7=+=bwSlGL}=-xK(udF7J|_EXaDwegOyN5W%|+V0MG?ULro8Fwhs9)oIm~()@l% z7=P^0+=sLO8Hm~LoF^XK~JQvTYK2&eL&<%dvRlAvF&tAH&OP9Qyd)6(!WGr;2WP8cts)jIydPx=Fn%)hbPRFjsDGfPu|`VW-*6ms`u6V4#9VdF(jBl zBtyf5TOyJ9eGQjl7kR@a(CG$M6cf>aV~F#6W}7hiQ{EaU8kp)C0N+H(*0Q#%du$7C zQtWj{tziZq8Tr5Y4OyacSyr9UV|={YaJnH0dv(%%81oE#vHtvk|HGM^t>9{QVn_7| z;;n+K+Zxha>^Y&8KpWnf*HPR5aX!>&=pcQiZRiR1uG#!_GW4Rk?c(dq+Us|x2|E%txj@$R=-f1nuvHNm8E7A zAoM=aWv<~+O*yRNDz;pa9}{*V1|u(fK``m+%>pu3n$veId8)}}bP5$*)Fz3{uH${)Qr5%^aY7w26%bGDmb50>@N3%=a z7`LGlO3yezyNx3!KbRV_ua~SE?>tjRGm>RsOJabHkPe1@Oe2Ux9E_9X#X%%@G8*Oy zkBz##xq52yK8T?B-8J~c+vg|G z!z1hSOT_3`n=}($MRg>XQj<0BlBZuCytwx`B)#%mTs%#4N{&&1^ph_AdJ3j<;$Nq< zCa(S_GiUk{Plo_2U(a<>m@r@6HV|`Ab8fu9hf-xC(VJA^8R*E9v66G-CP8Zsf0-HO zE{3w{mQHD7U}ke@d})~RK(HW_w_~CNMwn<$$Y>FIPRJVNOtr|=Nbw~ko4qV)Og)7L$ZzU~DO6!nqx8D8p0PGo91IP9t{q>+{7L(sOT(KbkH#_Sa1w`XvtV{Q^w+PT6a zd+)5_U;y0Dg*2BpqG%WhY6j;HND$^9p0T~!SS5XR=8bGCl`)^9oRYVSkj9IXZ(PNj z$-V0zT78$htK(~@mX^J1+9dMES6)$rWp-%MBQlKyH?^16jGWk@U*Nt|>^yh9+Et;~ z%BlA3mXxgKXnBq02eQW8-z~y*>vIN6qlOrjW9s|$GYLE#T4*V6u=5kw;c#y!yD{sw zRhWC6xi52NqkkUB3D#|aOGX&uj@4v=Z5IC(ZHqavi2HKn3j=8be1N<@@1w3@C$sr60k!I{UmkpYf?VDdXo()r@1(5q2BQy-}EPFhplhL(mDnI$R9# z*^(jZEp3{i1zs+B5?$xGUNQG5>x8vggq{%>A8CoV462}o`V`=A8EE#Kmp-seF_>Ky z`OLRrE~E`6QA73fdYBeif1?1GhWS_jC!)*tq$zsAr zNIc~gt77Jrn@{n3N=WLisk8^SOVLuE0hVD?9LUJs{kTX$+m&~;-`Y%R(J}amLVhBY z>j%cCPy^XbshlF=fWIT@f5p8_KQMfHAdSPb-UO+*5=la3gl(t%ku^Ct!S`yip=vj> zHLkUF4#JV!bRU^6XeVNZd`!dS%qod{?_T*2x7q}LK6wVZ1Jn?^VlW?kCnw{=NsX~v zalYHmd=`E;XZmHvb7>3JA7jti#AIofZuM;}ggnzL3QmsbanvVfuF4m8joe3@XT&aN zw)Havsa7HTE87Sc8Kux!Z=0Bobq{I!g8uGmMYMgY+C3F0*%Vd8(^O_GLtw8&NY<|V^b<{@ zZ7X$ZQbu)^`j!In?d#7g@WcaYHMWWms?&0Y{%BzLao7M3w-+~8KYK8z3p_K|9ME@@ zYl|{|OEct9qdGau>1+P*rO5j2v@nCbVp!s@f9lH9?dRfx|G)=+*D4jF1syZRZFsg*MUNZOYP2rQ%MLi*cnS0VFwMI{Qk3t~`M;7?|_>kZkjTZ@xa~t&4{2W|6OUHnPjwVrqhTUG#>sap&-;uJAeAUp{+l^!i$$!(H+$o7Q)0?%q{ax{Jvb^VuX!=A zXi$u1s0G7a50TL*F^aX6B;-#KvarC3*Jp3VsIK{c*~ScrY1k%53^sl~_~bEHYSd4? zO+>u%9)FQUWuMW3K2yTJRU);)jKbq#N__iF&GPy-EsP;7qnB0b8JHt!u1x1%6qxN$ z+-7O6WtzzKL)27m!Rea+g;(?=vGtOc=#3iV#8MMF?W<~KQDhmZO;7bO#dC^JHD1dh zmv;3012gmMVj(eGCQv_B z#t?z})wSAhRPNTCLe)KCuXEpU+C*$pE}o4r;VN2%HK?w>`ipj^)OC=!S_wb?$+9%@ z+hb0f^}>16{R>gqDDBn>yb-Z)SovM<&M#}Hvwq8P$6W4f)TVnpre3A0sT1g&t{hAp z#$r=Sg!1Z{5Gp*FJJEM8nHYk-6>%1gOw^j~VciitVaei*5hB*oF>iY8q*YF~a(GP& z3A@VUaejzZTIZK&Gv)atiMh!Y$G6I+G;q8PL~X;tq**A$dh=4NZgH{NVtaBrwdQ;6 z%8#VQ^K;&Y-W!mf$e;-SW&BZ!gX<^F+IjW?!n17hu6HeD7+pH-hg;Rta~~RG$;wlTxA&N)VOFx+Haq1;)Qh_B#`ZqSVTu9Y1;2W{nfUS6!751c4LAmt&(ird zMT7|?QxTj8nU=I7oIov7-=t53zvemg>#W!CSDLn{Il(`k^Hp0*QKq;2S$#GKNeMh` zKxCaR=;p{}QAxEZ@u0{d6Bx0L}He|H#S4Qij`-k+gqP+ks` zK##p*Byn%@+N4Zy#NiUZiiVPAKG|GbmXVy9R}dH=6ekvmDpTq&N{uc; z%E^O<=BtAj6a1`S=#*1twQ20aEBj^<%V5Qs?Oj9cet-j?%paPx!kOvTTWk#Q?4&iZ zMf)SNi7^+~V@AvreT$s3c{%6;`-=6EsF=hiowG`Gl9P2SC-QvoOI+t^Jy?eLo13r# zXmvzYMYi+sT@>r8C!4W-a(k#7>X@D0!T+kvN^gdRzpd!Cx7`eMV93I{_iGk;7d-I! zH4JYg;tW4VZRPHXXVAQwWx$niri^E#`>T7o<5CP21D(l^8buWYwW>!^d#|=mhSpJE z&NZ*3K<>`Bv9~@Ci{Ic5!&%*4sEX6P-6<7H#i?aXi* zLCq)K(N^l3q0zcp2?lWK2JPoafw#c55bSALhVQ$Bgz0y|7>sa~KW73TUW~a3t%fO` zY<*#Tpj|RIF%qv1kZVqjTr>T?f*Vc}S~}`&wD+?!9h+1|Zf>!<#KCyk@{Ww4C10^@ zukPD$6UD(p=)&#mGFWI1W~5^__PY1AnrLQQ@6`-`hD zM91|pIj=IeQ3SU6zho~jf;~=5!By3S>VEye2x2~MJ^$8uqqbIV^GKzh=iBnCoZ#4+ z@7k${IgMW6-7?L2F%EuFRy*wnbvj0sNcmXdr576=F7c8F>qO)(CSDHf%KLuU)y4?p ztO}c{UA#YoM_k`J{_wDWe{G~=l~)EyV@zK9wWZU*v1(RT7fvI3bt|uVSu|H#hS)jA zkebyj*ec{U@8a{}WcLL$`QZ-cp1dmkEUW|8Yz*C>F$w-Ag6DjrRLU)smV#+g#}rGA{8#2PkYxxz zYv=GZ=oBf5JLEz?n;olYqG%l-HY8TksAxF#WnVWP94mJ6f_R%U-t{GRK(+enT1C8* z=63PXGXTHSR&+^BgUyyfxFH>jY1=&7&k?m54igLwU-0G8tc1wwjMB0ur*>W>%&u{6 zmLtY`N7;^%MWk(JDM?ni?v9|aOUEMqkX$vvF3h$oTpS^Ji<~kv4A9jHmw8rCUa&d@ zo%Y>n54ytg>%`O;7*1wM(**PT*mqM&}%E zMoCE?k43<#hkGH}aF$G|A4l+OHqB>ZHb3BU$irLr(?vQ8`1MWPT3`-d>a;hWylYkY zZX3C(Q?JxgE?50RG0z!mD&v*wDc*3<^1bfsKp8G6n`=PvViqZ=Q>?P*vROPzw61t#==A9`5XJ9Lx)^e-P%*Y@H$C@`@!DxU!&&0opJj`-jss+r z1~GJ}c^r|Rl;DRtO2~=$9qKL@A+^QDcfvFQ2V|}>G1HNAUv@wpUAC-VU1Ay01jFqwa~xM zN;51QZ#L2KDy2)a_OgyX^0Ih))H@kB5|a?2kv8|p1d_t zL*#?QOIg-j+g7_2BiOTc4z8w(VdQR`1It53eyiRkTU~;np8JDd(qLk*@Sw|g7d2??)d)e{xYglBi!~3&&t7a!8FzQ)eSdc zv~iwqy_NCgEkGXqu|Nkz;6YtAsV z=BVd<%-V-M_pr)~C_c)sIgPh;8in*{LHe@TrqN#9=_Y%6&U?Ca9`=P>r*C z2F|YC!tH7A3nuSpFgW50beP>A)w27!qdVU0O{UAW+T~c@;MnT+u3kldaj5oO)(mq_ zYnyc+l(>F$HJg~=-zGS{#pa?B(~6QrxsJ?yV~) z=RQ==-+=v&J=d6p50^zA1=C97nH*UnU-TgMEtdX^oIGtBy0PrzzFbL5$H#iIm3wUn zGYR8zJQX#mn)I>G!Qp~k8nIwyd8bckRcf5U`}6PluHhTl*M)|TFXr@q%sD>ZX}J%c znktM!SHF_1cOG|IwGm0`>L4oDgF#w{2OyPU9pI{s+jM2M`vKl z7pmWb9nBZ3V>9cTN!L-AVHb^ybx!w4*mt_O(m9c-#0WGW*dtpag$qB|GUzUxcQX<% zpxgx@mG~p?8U%l7mN(1Tb#8YMpXbtNct->9DE2Z&F7DUgoc`Dq(OwR6qr}`j(VMkR zcy~ZWsy;8t+*`^Te4<*tM0Oe!Qz8#)xF?%W!51+84Dotf7~#wnFE|aq+7DAT#l^^o7uJp2K8v*jb~L8{vXr*-ZnAdlF;-5} zijf7G5uE32H#srmt6S!o*U4*e6MD4fwHn?0uB}m1Y@dgGzgHzi4-8v+#GSu|EI&zlW?suyL$kI1s^vCz z{4zGf*(1@X)Sc2q2TtK}!w*!gmqWP2x$eU+X9Mc8|)Y0c}QB9^d(jpY1!`TKsITt;0kNkA)kv07kVLHDw)i8IjQYO5N zi!*f;q@mMcME2N#odg-Ln@ZGC5q9EEhrap1-uVGbAQpp*Ri9&8E0SMci$!h?LC?>R zIfJ_{40lltGm#c-fn)h?(=0O7HbovzvoL`4%>a&&L>7J%LAqW3JewU~c^^@EOc0(o zw!WZ%!xk^I%JG@d2HYP1+zH#RrjbjSur@k~lga`djwC(6M ztDvS}xZt_Ci-a`R-+WtFM)kDInq)h&Qa>MXL(ubv{d9L~z|4{B4v`+@i$eDd?y7_z z<70n$?DPy6ZW3iu>b?V+#IL3YoXo4CdQLwvmy$Uc)AX1&i<_us`#0TATV8x6ZkVq?;1qt~}PUB_A=w#p$LCtu}F*hx@4@J|oN(Waz`der+Q zMLQ&~Q)~ZrT^B-~2rUcKa36K(dPHwlTtQbIM;e^D`e~~Fd9%4ol8aJ%guD;`oc^na zZe7LPZtXW+!f?lOSybNb38FHU@|(7`R2D-oF==aaf4hi5I7+L;vcgJ7%t_q>&$jNl z)VxpC$5@{3j~c_b>dgcB9*wNqh@~JnESFv-)j`{R>3C&i336ZB`teJg^pm*#BLf>) z7vD4jC3U9D6j~$Xf?Deyo|++I(p%pjV~l?5GqQtCrMx+usPy z{egw4lt4vVv#ugZrAsH8&w-ic0~fWT*@%5hQR>NXiCM{wJ9EvVD(4}@OJLAAmNA<1 zu8QQEXg@r8?=UmMjkHc-lvfIi&AT#u1@|U-d{1d7OsI(`@6+hq61dWq#I2>a0pfc) zM3Xuw^X0k|V-V3E19{(Mf0bb|OTJ&oiueJtT_yqXT^!0+cJ!|)K8&aPXdb)hae&h8 z=^IQZ=wvG8DfuK72bwtKS>J66B!@c^QGBqtpX8M>*8hY}gVj>Go@3gmFh0Dbtr)o> z?3@Q*DKZU{dX~FIXD&RVv>afmU@jluT&r0wBr3I^CjJPHl?Jo<+i0viJ{>t3638ql z6t+h?%MYcoqU>!Nyg^|f3=FR(M}qG0&eX=R*CD--{|)D--jLICYJ)VUaQi=mF2);LDNymT`HXS;&dTly8lshA=-KH!<=7qEzw)Cc=#w zL)g8%o+(EMJR9Ln_7&y2PJEwzmp_IW0l(pdOMZESNNn(1JIbvx=*(0cPL> zB*T_-MXus$vcfLR$)QEM&N$ZL{@22Xr)vF$1yf&+)2Z&o#@XKBP@?#cG*m5$*JZ>f zoL|!0lQ$#tz{If&R6%F6LiaL^We-Sh2sLB0;`-h$Y^;oP!Lt-N8ZuuGnbr%_y$^gX z#SG*RcC++)ievW)QAWN8IMEW&xow9ooFYa&kCceB>|LCA$o?FZ=wf z<`#ST(hN3hi(ni@zw7#+O0N~0u#O$O+TkjvO{g`-TO{o5y&9s@OwAztOnlA1dGDlD zaBdwJSzX#|A#9!aA+mdo{g-ebRhoQ_PkfAu?v>r-szM-lJ6m4}QF(-pBG{D9fkZ&%Y_<90_7R;mxr9GU18#sh|~x$!~}7 zPv=~z*EjVc_=l9a+**}^x%>DYHLz~kG{ML9BxU&{8zX$!0=tPI@t@-UyUE_5h3ir; zUq%@T+KdzOr8HlI0e6ciWjCF=zI>&d@avIi z5V>a={P@F&c!JP%4^|Sb$!B1L()J?i!*Hd_n%@r>JUw#HPA8@B$={gvdiE33Y)bhg zQTE(54SUItlCSD;M40FK`Z^&+l+8#~_Y|zVBS>!ymubEi8Z}0Zx4s?o-Cqt4LM}^w zT0C4b4n*)FCeU~VAIvC*g-M%yC%HP+o1UkV-t5~zx;IuJSqgErq%Fw|Wsb#Dmj(Wt zoLjksCNDa7;_9A+Shv+a{Af)o>i_=HiK{n*(^T4X@yc|z?%J-@5SpjJKQBAD@n2Ib za(Sw+Uf*rQD?6rDa%#4^=R6bV*oi3*zv%WF=}AOuMLNfFLI3rA`-hlr<}wVn!hwoY z8&1}_FKjV-1noOrkUTA7*?6@Tp@esw11#K`oVd-aCb$R117`28L^LD7S@cvb5*puq zMXVL(kK-fxLQGfSOC{xXJ zCVU&{9pZx%cz<8Q&ZS`0vQ@k)TmHR*eXPTfKqONlft~$sJsuEwdKxd2vxtKE#f~ zH3O8xVR)4`J_DwW_e^!VFkK^jRYKeGE8TIMRRrJCzh#HyoR>$rbGQ?FP---zuL!^t|$dw`6( zrlWZ_cD@=d*7*f`Q~3+7gjQj5e#=T+6`p|nj;t)`LiriMfh5eyMUip(IU=ixNRcqX z(#84}{eBQVF_ix(eUq+RKS^&%`>R|Nk2_XKbY$?iL}Hob4Ko|Julo3AV;T~5DDq-X znsx-?wOx1xm2Tx8C5|s`c0x}fKWqI|==|;}->k21gD)Jc(6Nf1Mw@gm>8e9W$)!Hu ze$xo10iQ$NaMRS=DDU*fCzHd4NT=|bO3r{|KL5aPCIq7!VXqsL8JJxMahP-5;mf^2 zA3vtmoFf#8&3%46yvhxuKkOReD_)4z+rAjF-IG8yUDkh49JORCKI2iBy{LHse~e>dgQX>{JqUmK!8DTSJ(F>w z>P#|{RW#+QUADEv3d%}N(nC`em&{SPK*lhB$o>(8xX8u<(ZiYl?Rb@(S}g{*NzRFe z+r3=#NysNU?j!Dx0gAm6mm4}`{Z3d3F0~BKnfyc}&wxRT=8EfE)^zMbj>)r(9r6C+ z^;4Ry$7#11u+E%&mX~G50DaAmx^w%BSu=LLNUeipV*i%(X3?2)U+VNj63S!+b{cB) zBLvo=!ITTT~Go{fg{YKPt5J6|xU?P#$lpt#KQYAr5^)-~y;t0=P#%g$4qcWXY3L zheM?~Ua)9H+ipMaCP|FLOcvSvMP#QH@jdV<>W^=9UX9_ehR4OeK|l#{KD!knqvcw6fxu#$>s z|7JIubnev-^2NTv63vO=7rvAfJ+^~59l=lUGye2MIa(qPP0k4S3D_^p%y>rEQt zY(^`i6TkM-q%y)oJNgaK78{`RF;Q5Yt8hI|?I!r7NI>9v2|af2ZqCY-r{8qgpY0hS zbH;2_zfsHUcRKZX5vHy|Tj%Z;UaRYj28x`VG;qhC&=uEI7~?CUF=kos9>_C< z5XP?cbT7m-RWKc0m@dstdYZ8I*qo=@+c6Iw)mxm~tDTNnG%!%Ag5xcY5!UR8P81pz!PaheDKovP>vFc8t7&9k zjd-J1VvbBgwS%A+H+iPw)n!)4Dx+XusI?OHtG0S-#!Y{Ec+l8%(DO`3QuybS`FX)Y zkzS3{W=qw}3byb_;@XJcSL_of_y&qJ+7$0{uv|6IK3>4IXm*$)JjnZzNVIrCGUt9Q zoOERBPr415Z>SY-m_}e7{IpLn%eye$$lyy0i9%^=aAAQU3KOZ2^ zsVaBy?RvR>f;Znr>FLmJ;MQID(U(5oP}x}kb@3%bfB@YaZbr7>BPe<(I);&lJI)eJ zts=cqzS8+Uu!8eqAs)V+7UtGUol_R)#|*AN&sKZ$Z{>%gdXgZ-G6}k06X??vT|NwY zF`n{k;UrXDFB893Ybdqma37Axc>S?HRH8Gc_odHFp)c_p4&N6^w}IGP5?TCP0{JGI zb@W=G?@q}VQ9+ueDb5@mMD#Vc5a;b5^Vf=3L$BWR2}20HX^etrj*8o=VQ;ymIIp}y zJ~`C8Fg%4*(nd~A`Xa|UFa02TamGcdi$9WhG=As8{;MXA2FcF1m`_#W+~J98G1zwE zhS06ecFPa z7-9(`rB4WU1bXu)){@gL*6aQcVU5?1A32~$mmLBeua?mxxi@Wi6``9&*qZb{-erS3 z9Rmp(ZD-I|wxYGVsPEPOar&?LHwkacaIE2cyFODUQE8dJKU-9Nzm6kbWkR}+Zoh6k zU4zgB77GKT2~4m4Dptba{W$sEMVHz6dqottV3bP0O_7GcIs145Tw=cDl3v&U-v&y78- z^{2bS&?##1$hI}>sL}`e5@S!%_UQRt_hcKae)o#tq-7vB`y?-|#U%|wDu2_yCd4Pi zm*OKc$=`8MvB!IqW-UGD`-5vx+>rm5uWG}+X;&2P$c*1ZdLBNc{Yhf7nKEN>?&ke? z$IR=7ug%GwKfK1cBfg~J`AmP~tqs~LSfRwX<={UKNp8vgf|n%;p0*cM9oQv9jk|aa z@AXu3g{q>oC(d$a{IcB;%SahBb|Tza$>NZ>B$Ze0R5HplOFOLFhaNQ=fewxXwt zh_n1+PF$O75{d?nbT2ncHy4tEn1gMcp6u|$1tB_Dp=>^%k%P`P58>MuBB-x+&3 zk>(A3ain0uz3*oq!0D(Q4r25rgHI$=Y&8SI^LASaF1UjqC{qQU z>R~)b#6_nUmlBro-;>KCB`U9z1NaeP=Ptw>2o%MLh?C|mp3Dgl$oO8Rlnbia*OR`1 z_K|u%LF;n*!4j8UWz!%Dgh>a6X?B{{sjtx3u%bh*#ez@1I^~UCR4d8E0}l?oo~~E-j+jC z`QTI3mMS|~Hj9@6(3O3$`AQjlrvsVC-GSz8-Uw2<{ugb~w!Q8aO?d&bc*H97|0=i5 z)#KC+MTw%rc!nzH*uGf_lcq!r9bm!~w0-skuR^yk7a%IuZsxgjt4S0)UY}heh6ogY z`)p6QAUp$Wf*-|5$trH#@g_N6v>S`?6sp~OUxFkr_NKRrxK!P!l2{Rm^Y>-|``*bRfDgG{)Y%$?R= zExM3=YpBWe z5`!5mEVIsDxR3WVuFm^B8G5%J6>}68#rl9gXUzOEt#MbulnxRZo2)~Y2#%3e>csEw zwKr?KH|An~ygi`RrpUf10|$^^1sbM!vah@dBYUYq1e)Z&*q7z^;?|0f z86n`Xg`-qi+^FZjkQo?jF)JXPtG{ zde6K5=fnQC_s4xb_j6y@@9OgU7T&@=MeZhI+}FObMKMuO#St*M#Q@);)qcKtuGi!1 zMjo#Y%oS!N48n5DaISm(LS#LDpD!BH>_qaCSSZ*?*Myb6jI0~g9I_j@2KYftY zGS`P&QKh%_$`-K7B#d4lD0=!g$_RQk@M_ODeNg5FzI%k+s9pN@=@7i6D49>(*_T;} zgWXQ-6Uk1axu<4tm)-~-GOA2S<(K@l7N>L(EYApZV(C0?UEZ!Xl@tlI?S7g+eJuSx z4HtabGL;`%JP*&L>7xG!7$H$#HM{>Sb4?_shkFLe) zVG6+C(B(D4XFI$svN!5>b8Vp-XL~J!u8-hc-;(a*_>%8zC9H_m>{PUO2g@mPmp@m^Bg?U zY8mBl<_I;yHTW&~te{b+W>aU_9cy>##r54xqC0t~AR-*9bolsgDJR*_YnY&nv+_Aeypm{>@yJB}j z`g{&Usoj0S@1L)Ta*s=`z+7$pz07G=dxn*3C*(F%%ww#Vz|a$?`1yw#vIp#z^+i`1 ztcNPa#fyi7Jj9n%t?m%`j#ONe@H)!OtDvJvgMQ{;&Yu!-R0a=t8LWZ`+(rMUq$dc& zooeBm{n*~p_xW>wZ0)9Vof)%50#IS!;7eSrwhSith+g#!H1&v0nrJm-@ENBtK6OU^ zo)H)LT_~K)U>t*)#r_Quu&Y?=yc_C^+a$-EUZ~eKUSIR$XS(QWFoy9HG_`~}Mxwq- z+KkD{n<>u8&ECt7`VI#;{Nq~QX(<~m%|I_??tbjNPihxpFmrf9)U`swE$UFL@lK8J zSB#mJ$W(`Q4PQ!dE^c(HJ?qxuKLBm}>p>9GTfy@vJ-w{GETmRzhOm<{k|*>p$6M+^ z#;D&<9Z<_%ZGfO^IK^_v%OdwI?>8Io^4D2$1BSr9^?*(3!b}6DNe7~$vu6XN?tx(4 zPp+fTO}=^04RQ(;!R2>HWbA&)vwc0**EfWdn;ZJh6m1Z`-}S_^_e9<@aitVgn?cr^ zVqJE5fH-0m@%_+b&2>>!NAg7x#>1=Dz$DZ0_xEa}V->PhqWWX5P&;$b-uZWFy7MyA zz+*_pOX?Yfm+*wS60wr-Uk0Yb66tuZ2ewUs^9o(A2emMvheRSoHIn6x)4j7%{=YRO6qWeDrF;sH*qWLIFi;x|X_rE_3f|UiW zY>k(m@-C}~x$1;r6B^V94O&)UjchEMJj+F~NE=CL?uk!o>) zZUTdm4jA$HOF0@N6t7BW7`|fU)4K|^7s>9?4D?n*ZqT9po7q(ZE2&?%=R#TG8PzP)ctBo}DDZTY`Ytp)pAq&+b3KOMA6$Bb*0=)sw1Tyln2Q z_1B$@*0+_>IS#Mz1kqFK2!vx^&|lfbKX-+ftuDP9U=9m>`Y^KP%yPt#Q9#-I^yLFCTjHsb{)Hy|s<8cofEPrEl|RFk^e?=7M<$f+qx7ZG2W`oHZ5z01%3ynA&Lkin2N zG+9+tf_Zsvt`60IH01~047rRU!E5p}pZeeTHK#lgj>3|NS$8ltW1mHCP*ty1F0F4k z%^!!{AQ;fDOIJGXx@#Az>cns3X8RbT3}EVJ7eRNH`6+ub@F7ashp(r{VK_A?0sZ_& zdc&qE14v77g^^5u_YQq_27N*g!w^QI>wr>HKwJ9SHK;sSu3;lscglvkiCm zH76&rYUX;+G*!ra<4KSYLxj}XhpJYS#}JJ!Ae_Ht_O5{fW#dU2mW`XnqRXpKC6;_oYuBU4ew#}~k! zit30B6EGC!RG>T0XZkvgvtZSEaRzqEDD8L|^|WK#)rBtP4fFFt1bl?!IAlN1cZhVM6r80qnuqYM1!`0MlhA$mw7uJmhzt z&6*g2hhT7z5ZSUz%WIP>>a|rRUYWZKczj6W91+S)Qz5ZiF?TDXUuRf zEY?23=|WwhV@fy|fmP1{boIi|Wm(b}yt1UIPd3mOYX1QWlff)C4dMM(koP&6HxitK zTUGW4buPb;vNAmDijOwHVcRA+1T^=DOPHQGGqu|8Y8^nF%UK?FawV`^NjW_%YzG4} zMIDg++MVse9HO56O}|(R3H3e0H=H_@7%p=b`A+fj#Ih=aFy>hDFf%P98JPYJRD~az zpgv99>7nko=V2F=Z!YjgpQ`DOeW|yA$FUnhSuNKS&yJ&u%gGtc;pX(UmV^$=8pz`1 zfgYoKmL+2A%xo{iJ4FOv`4T8??h0BAJ+}P=WDp#q z@Am?HpyNYpKPK`U$9Xj0-AxsTk%oSXWr+!x^xg;<)sQOZl)W*vZ6)$DDN=Y}d!21l zbB+TOv=dwT2LPhO9uy$8ZG+dT5dqv6d}n{Vhkf!|f~t@-YlF-~4e;!u{iKhsweWs0 zsYn|ps6@4)9Lb#BXWGacKU`9sz2XL;rYi@0DBdFW9x~&xCs=I&llc0+ZIM6Y=a4O{ zjtijpNiKUvn z>C~*ZY@SE{eFu$vm~Efm7c|$h%CmLqH`j7;bPOTr!-pL7XzjX}>D*6?~ zHM{$n{b?lu8vPr;h1A;(HNCM@s~jL64;RaN;95Q)&(YMq{vlJ zLb1-{v{jd@(ncMl%6&H<<0vgiRAMk}#8;)SRI^_Q-bfDF!XeBqUp4mOZulIq1Qhq; zgt?5o+!bA~LwuYtk0bQkp|*$tsTlB+@h+J(M|(I(8NZyW8N2Lb zqm7FnrjHf6c&`2)ZIexjDshy)Je^i9)_$Ip;M_DQ=YngO1ck=z%8m(~xH>mP`Yw<~ zBdk{+Bl~Lk=qh{Pk!pCm#RY`l7=?Jazzxa2g9{!S7gjEl{YmKDmQ;l`Tz}tSHmP+4 z3v+c5h!(Ka@~Si#(gu;&#EpIF6+>$)%iT!tWcSAyQ* zljNwPB$uKdsl|o!bag2La!38gEDMdSZzlK$ZtNzc70Nms+~;Gf4Cu> zTV9YeJzH4Rtr&Xqm0t#Jy(I)`b%qBda}Ua@A3frJbR{>c;u(F(o~DJqMel&l$WJLy z@~(ljnQ|oYeXulxb$+yTt|2MCYO%<#%hJX*rJ&x3CeJ*ZZ>4sLEo1Iot1L`|RtJ$j zJW{=IvQor`fsT-5aqao=4c_rLdQFt$c>F?0tJDiLk=hQ#5|UY;i|sAi*Icl_!87xZ z?&kjo0K594iD|u*6AwTo+OTGX)O>lPb!X(te~9KS=sxZ9n@MgZYN4~hhV+pE1&=Z7 zC0FN5p>ugva63u459is-NjQwrBh~yWX3d|0c zZEgM6&r&?}HFrIYLVfslx=pHBKTaQrb5Sh( zGp>qi8?nWn2&nBqZX8_Weist$4I{dM&SQHf`cnpX<7k&o^o^`D&$e1DxFl(unV7_0 z$94Eb!rmq{759v*H1L10f0+Fh>t8?06^Y-xT>RmMD7F8>{4amXQpO({8gm4Y1%@0q z`q$>-i~hZ9hR#Z6qZgC~BiMQiL7JYCvAkC%d&-LOKOjK`frXJ zejDhMZ0gV|^X3E60y2=lU5Fd&>*A;Rmg7r%FcnfokLofqdc*hfN+^eIqtCe=tBYnY zKYz$;ZuI7{Gy&UI*kfVsc>gla@ThUtJmFq?cRe6ObH*N$gDJfCen1HfdE=(}B}d?> zz0_H&UT66&$Ip^g6-P3<-VUb(+S3?mLDu{ii$?G|G@>f_=hWy|d(C``1f3wpI;^^7o2{ z3?!tCd3b`acFbzvdIzG8!Jb8-YY7z3zI_t;0VYe+YBjF1a0v&JqT? zwPkZ{z>%UI9sXwTtF9Qtns`9Mi0trkWh~WQ5{<2cirenrg1LTQ_rkPgR+V;xu&ryQhrr~W~YhwlR(h31Dv9=xwK5Z}qs6DMl#kr}Cm014%Zm+_iX zrsu&{l}$0YH0VB3DhK0Lp8A`*+_3lO(JFZ6BOkDsm46$ge*gHKBIx8qtsLgCndDQ#|dLEGfwr96b{TpnpG#qtsEG(fC zCGriEQ=o`fw%yzUVD6+uF<~lP+S$8Ic?JwV+Ciq~Y1h~O2xXgI%8|iWbEctP<4u2X{Hhj;c{i*|><|Byh=pS5;yMKkw9tKBLC0o`VlywBARB8|0vd9loJ zH|urs8R0eQ@Fke}yqIt#&yt4C_Lk#ta$T0th`9XSqB2m~Xn0<-bX{+gXS1)ne-6R1 ziH!OSNw_>PU@KqK8ug);%h85UX%5^G*?j#HkN&LthZQzryqSD0zr)_K)94KgloeLe z4Nsymk7p7ECjo-UJ<9ac?$l9BcPJEmrlvKKN2gplX$+&&_;>`MF4r0s*x9+~Vso;U=;{Ev+N~TNnYq5`^_SzUF>hRpMz)_9b}kdIihWCO7esE z`LmYK#4sGb^jXo>ooKRIG+3H#QTP#zMiPm1)S))I#JkSpIwL{@1@&zjeGVZLMCGpB z5kNc=hG>(5WUoQjDD;<*-V?MZiX9>aI+vD|Z3&CuX9nRr+l+q9oX6XqjY%16ewNhV ze0C`Tr4gP5JFQK}T6Lxzjr8Y&rrG|r(wLO(8|Y;_TI`YVb#D0A=VAOTcv2-AdfC{- z;ZPZ_xnSIPZ>)4qZ#v`MQ!X^6k7+BxpCywSeiD9HS~L1hTD(vWF~MfbCTBhLWs&n|Ntrko-~feP!Q{AJ%la%cJhZz2^Llda#1nhkPBY z7znNiC=fAF*e|8f?Kx-=R&|g5d?_%7x;Eti{41X4eJ2Hu|Af$t=V|9x_v!_`cH#2) z0m;~}72hH0O?OTnnz|!q>rpyox-y4Qu20r^bzu@kzwXO9z6jM&qPqwml${zTh(!E0 zP$*cyXh^lqJLFxg6EdT|JkXJ3PbOw!({x?Oe(j((9KXkyH1wfR&xR5}?NW4h%vsrO zbNebwXYz+o?>H3m;;f-KfdlCg!I$Zju$SJblQ}r1a*nQ_}Eilu3@_OHoGit zRfkfo>-oN(>vX3zZ;BXL4D*Y|BSv&h88|AX%AXOzTOHq&SGUMbtMpx9*pst;#?(C6 z&q7shL^E5+fjFg&qe#sy{sG__mWue!2Ikw0aPxG&j;^Az!bw?T-h8bD#TY3hXvhaE z*WFXAYQ_a@FWaGTeNno$k1zJ(P_(av>}(}SHz$QJta=20v5#z-`B#>_A3FbEtNk|P z`=BeHL$Tb?S^H`(TLTY>{-XQ~g-@s%GxonTiLj7f1wy7v{G%F$L94g124z+?vG!ik zTZ$x};TI=CZWZjEa-?jbHdPlqrJg{RM13EP%XhUx!&35Q%S}MV zKPb1PZ1#v+59}xaA&hr@SUiU@E*A<*utxCy($!uF+tl7fu(?qY9n3x)?#|qqq z7g<{_=MQygeCqw!Q=SR@_+1ezU)YwkrU18YGsE^covfsxq{H+>1tu1oRJ9!Rc~b=O z;1=BI6Um*Qw}kO>-ky;evq#8YABARcVnIn7A;~hDs|8L3J%eQ1F{mS%ErFvwSbJL& z$fOoI6=Bb)Tw9ysTPZzZW+%ppy?OI2bxNCe()(y@sj1zu`a@~_8oF>{Z=>-cFZ-gS z*OXprzwY!F%f`DJGsokt+s)XF5aIG8H9g(M?wy6b>pX!(`S_E-r~*)P#`0AHZ!U!1 zp+yW9%@K>r(rDH3P@S9I=n7k{T17R<6+B3caofA;wls}5)r*O89jl5Fa(bkh)fbd` zS$heuw}w(trTCW16g9B89VKb&h{YB+t>I}CV#3NkpH59z=YTy7%dISzp;cMx?Y%$i zJDD!~u1ZgObjyM)&p_AfNQdhijV7AS+NM}~f(jIo5WkaOOEj8y1a_to z>>)0C+QwleESqo;oMvZhHd;|^hxMvNLl`IBz*VSHV}S5ve8B?T=XDP0j@Nou;;l7zxw;Zv=i1qp}z05OZylE5a_QsGi=QPY`% z6KN`#>v>0>bG37Z$AtR$`$xYb(D3+~g@=zNda_T=5_OUOWNR{JAds*T9LNlV^d`|73s6H`#*7j(eK{{U8 zg5PVi@cHL#hi1X#u)H|u?Sb*bsB~{XEFyZ%8&3M~>)Q6T&|3|xk4kD8S<&j>k7D(7 zLN_#eRN9uu1F=Z<-)dm*(gb!>HYIT^6B9@qhZ5N@o;BuA*V25S9mK{CV@#pe@{m+A ztRDde8HP%|AMAA~#&B25Xck1skNvUzn`*#I!(A&WmzT0iJ5VXDjhhZIj6gz*DlilK zjurG$&?v&yH$Ux6a38qKy%xLKSdNpvSn`U{@nKpNut-^E){B4PUD zlGx?jP#d!=b3#jb?}}bdhnQ}Gp4nf*V>ZBpI?u?{9Tg`wd{3ZSz_>D-Q?H_YTRt*g z2;s+2uBVP5&`-d$vf;_FQSwrxZ&UcwA?PwR9o5+&zy1FrzJ>GY^V+jR-yT9|s68?| z{Dej}e8aLQxUp~gKg~)s|A5B3-I|CV^!Ed0+b0eg0y-O@)w3WwZ4O zqREkD+RI|CnNE!rRc``sYA|W5YwS_sdK^XVI?N*trC9I|SEqXBGZi^yWJ<14O^?~= z8IoLg6K4237@8W3uT~K%;VITj9Ovm7Er~dG@63&zgDC~JlT$_PJ(;i;SXZ?bX6AEV zVEB`x#qbOJ{OGEUr;OoEXBqFAvvRI@kJ{%$&5p)VNor!GQ}6HnsP~OcOrR7C4GIU) z^3&^=yUfMoUO?Kff5e5mhSR;`DMe4<4p?j2-xgQqb(pDq*V2KiEtBbH*3`k`KL|E# zfrePJm)_W&zH`JcU2mA(yz;l~{Z&D}*F{C++{{IC&J>Q+i{IqZ4}EeV;jIiYZawS2 z+*qR%i%rZ(6}O*1G$3XjHZ^26wbO1PYlg~L01D{Io*@BSuA{S8JGF{uS`{CfJR$-u zz({gvZQ-DRKAb;2URn8P&Ss2RTEJzgquN-p(8oWezZnwq3ow2+WYGVJF_m$Bw2rrr z<`wD{8{?l)1M_7; zOw0qmZDH6tlq|q^aCE|=A)4l?k00Yc7NYCH9;y_^^P<>`=jgF79bo9b<+j1Q-a(>p__54aGrsIt7qBlDQ}WfjJ4 z`ip=Oq9J$BwQmPW6}EbYgOXSroq~z+l0f&@9v?vIo>YiZZTFyz8SOvz>|H7~{lye< z&An=*PwDrT>4n7}stP!S`pbqe&wqf*^|1UDj<^fnQP(no+j7-3yQNFE60)X!ToEfU zw84{S3HErm*geXv@LB%Wo+Wya;%(KUY(f6ZD!G6U(S`GSf&2>M@n^SQoJ9}m0Yvw7mN#BPMClN?{p;olRNBOX(# z6oiupb|-b~TI++FGhq~qg3_e^!;9tUGeTC2D5P$eo+gC|7K158pAgVnEl3XY%B1&Qt*dQ{AUW0sNfuq4=!~$thZ+toFu1(&ruKn^C`xMe@?=Z3FGK6~72* zU{NhbzbMqAq8nvBciC#f>KM*mzb=hBK| z0!i22k%KRO4?g{vjxq-bGUL^K#5MQra9m^f0;(->V?@7E#1wwgPf&CuL+WzN?;0eVGXI9YpJdi=Gc7jr%dvua z=ejTcqMz{@=FiTtyjfp{ZaoD(c_&xfc{$+SL(HvJimlAy->u*h{BT^4JRLa4KpjrA z=+PBXdhx+)q_13(YP#IdP-z;VhfU1V7m(WFM_j2MG792n*q%SH$ggirE5;`W#)0yY zg-_5v6gANBXS!;Shnc?R%sFj0gSf{2z^?a6PoMu-@FF@r7($4UcD(gS9q%}w&wJ0` zSu<=X<5=dMG1_kH`wkp zS>+hn)@W|Xro3=4{UN{^5Ufv8K$p39t0rbOtjRaiLXCu!*%DKkfep!L4B{re27+2c zJ3H3nDuZDZF6~CZu*cE>u|3SNfF*Qco7rcu<$SJn*kIOnEM$Nc9ePwAB#vlwPXxX8 zyKmJT(vk}j3A?GwFl=4^S&p?Ap4xhF_lRBneyp|gbn5BD=An>Q5g35L)m>g@fTUu( zrCAAYCz*?{U`&@(mqj~BI`Rl}Nv!(eL9r8sqlwMM_l7(OcgebE_!J|a{#GObys?Np zW;(|6?-mODdOpQbfw*LBCf|QPjKB41d_26EKK*fH-MX+$bH958Z;?mt?Kj_

K-`vvAzA+qLyJ^7*S@KH7zvf*#9;a=Zw${PZu`q?EML#L1GF9sBhU2cvJk zPAG5LWsdyTM~PMS^YrFM%NMkfm$z?Uu?74%S8x_f)eGjGdEhJ1Rj?iyTa@}%ybGFx z3)KjRu2*a#eb*LJ6KJIh=cG51{!Hb@VBL4QIjYK(xprIGk_lJOx}x`|huqB~K`|u? z**;)f>wWdjYp;={>dX5WG9zqc@RSBOFpO-IBWRscbDX(Lr0W!d)WBY1Usp=&;eS9c z`&xHvS@pW{?(|EyhbPik`MW-Cub^3o$?j8EjGz@*m)`P;F0eUF4wkJ8y;5;+$YlhGRE#&vml?g z`p%^tDC>C5dgIA)RP*H{zzo%w#+EA7F6DG673)cAixDgfw*Z~vYh1C!C2bQkh2fdN!1iSPo*$vOP zTE}K%KZM@!Bjz=|1 ssFIc6x6TR=_<{nS0sr}A{}0XT|7q)E{$K6o_|L|&@c(MP0sWi%Uy(97LjV8( literal 0 HcmV?d00001 diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..953ecf3 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,26 @@ +Kombu Documentation +================================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + introduction + userguide/index + +.. toctree:: + :maxdepth: 1 + + faq + reference/index + changelog + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/docs/introduction.rst b/docs/introduction.rst new file mode 100644 index 0000000..de4fa25 --- /dev/null +++ b/docs/introduction.rst @@ -0,0 +1,327 @@ +.. _kombu-index: + +======================================== + kombu - Messaging library for Python +======================================== + +:Version: 3.0.21 + +`Kombu` is a messaging library for Python. + +The aim of `Kombu` is to make messaging in Python as easy as possible by +providing an idiomatic high-level interface for the AMQ protocol, and also +provide proven and tested solutions to common messaging problems. + +`AMQP`_ is the Advanced Message Queuing Protocol, an open standard protocol +for message orientation, queuing, routing, reliability and security, +for which the `RabbitMQ`_ messaging server is the most popular implementation. + +Features +======== + +* Allows application authors to support several message server + solutions by using pluggable transports. + + * AMQP transport using the `py-amqp`_ or `librabbitmq`_ client libraries. + + * High performance AMQP transport written in C - when using `librabbitmq`_ + + This is automatically enabled if librabbitmq is installed:: + + $ pip install librabbitmq + + * Virtual transports makes it really easy to add support for non-AMQP + transports. There is already built-in support for `Redis`_, + `Beanstalk`_, `Amazon SQS`_, `CouchDB`_, `MongoDB`_, `ZeroMQ`_, + `ZooKeeper`_, `SoftLayer MQ`_ and `Pyro`_. + + * You can also use the SQLAlchemy and Django ORM transports to + use a database as the broker. + + * In-memory transport for unit testing. + +* Supports automatic encoding, serialization and compression of message + payloads. + +* Consistent exception handling across transports. + +* The ability to ensure that an operation is performed by gracefully + handling connection and channel errors. + +* Several annoyances with `amqplib`_ has been fixed, like supporting + timeouts and the ability to wait for events on more than one channel. + +* Projects already using `carrot`_ can easily be ported by using + a compatibility layer. + +For an introduction to AMQP you should read the article `Rabbits and warrens`_, +and the `Wikipedia article about AMQP`_. + +.. _`RabbitMQ`: http://www.rabbitmq.com/ +.. _`AMQP`: http://amqp.org +.. _`py-amqp`: http://pypi.python.org/pypi/amqp/ +.. _`Redis`: http://code.google.com/p/redis/ +.. _`Amazon SQS`: http://aws.amazon.com/sqs/ +.. _`MongoDB`: http://www.mongodb.org/ +.. _`CouchDB`: http://couchdb.apache.org/ +.. _`ZeroMQ`: http://zeromq.org/ +.. _`Zookeeper`: https://zookeeper.apache.org/ +.. _`Beanstalk`: http://kr.github.com/beanstalkd/ +.. _`Rabbits and warrens`: http://blogs.digitar.com/jjww/2009/01/rabbits-and-warrens/ +.. _`amqplib`: http://barryp.org/software/py-amqplib/ +.. _`Wikipedia article about AMQP`: http://en.wikipedia.org/wiki/AMQP +.. _`carrot`: http://pypi.python.org/pypi/carrot/ +.. _`librabbitmq`: http://pypi.python.org/pypi/librabbitmq +.. _`Pyro`: http://pythonhosting.org/Pyro +.. _`SoftLayer MQ`: http://www.softlayer.com/services/additional/message-queue + + +.. _transport-comparison: + +Transport Comparison +==================== + ++---------------+----------+------------+------------+---------------+ +| **Client** | **Type** | **Direct** | **Topic** | **Fanout** | ++---------------+----------+------------+------------+---------------+ +| *amqp* | Native | Yes | Yes | Yes | ++---------------+----------+------------+------------+---------------+ +| *redis* | Virtual | Yes | Yes | Yes (PUB/SUB) | ++---------------+----------+------------+------------+---------------+ +| *mongodb* | Virtual | Yes | Yes | Yes | ++---------------+----------+------------+------------+---------------+ +| *beanstalk* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *SQS* | Virtual | Yes | Yes [#f1]_ | Yes [#f2]_ | ++---------------+----------+------------+------------+---------------+ +| *couchdb* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *zookeeper* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *in-memory* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *django* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *sqlalchemy* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *SLMQ* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ + + +.. [#f1] Declarations only kept in memory, so exchanges/queues + must be declared by all clients that needs them. + +.. [#f2] Fanout supported via storing routing tables in SimpleDB. + Disabled by default, but can be enabled by using the + ``supports_fanout`` transport option. + + +Documentation +------------- + +Kombu is using Sphinx, and the latest documentation can be found here: + + http://kombu.readthedocs.org/ + +Quick overview +-------------- + +:: + + from kombu import Connection, Exchange, Queue + + media_exchange = Exchange('media', 'direct', durable=True) + video_queue = Queue('video', exchange=media_exchange, routing_key='video') + + def process_media(body, message): + print body + message.ack() + + # connections + with Connection('amqp://guest:guest@localhost//') as conn: + + # produce + producer = conn.Producer(serializer='json') + producer.publish({'name': '/tmp/lolcat1.avi', 'size': 1301013}, + exchange=media_exchange, routing_key='video', + declare=[video_queue]) + + # the declare above, makes sure the video queue is declared + # so that the messages can be delivered. + # It's a best practice in Kombu to have both publishers and + # consumers declare the queue. You can also declare the + # queue manually using: + # video_queue(conn).declare() + + # consume + with conn.Consumer(video_queue, callbacks=[process_media]) as consumer: + # Process messages and handle events on all channels + while True: + conn.drain_events() + + # Consume from several queues on the same channel: + video_queue = Queue('video', exchange=media_exchange, key='video') + image_queue = Queue('image', exchange=media_exchange, key='image') + + with connection.Consumer([video_queue, image_queue], + callbacks=[process_media]) as consumer: + while True: + connection.drain_events() + + +Or handle channels manually:: + + with connection.channel() as channel: + producer = Producer(channel, ...) + consumer = Producer(channel) + + +All objects can be used outside of with statements too, +just remember to close the objects after use:: + + from kombu import Connection, Consumer, Producer + + connection = Connection() + # ... + connection.release() + + consumer = Consumer(channel_or_connection, ...) + consumer.register_callback(my_callback) + consumer.consume() + # .... + consumer.cancel() + + +`Exchange` and `Queue` are simply declarations that can be pickled +and used in configuration files etc. + +They also support operations, but to do so they need to be bound +to a channel. + +Binding exchanges and queues to a connection will make it use +that connections default channel. + +:: + + >>> exchange = Exchange('tasks', 'direct') + + >>> connection = Connection() + >>> bound_exchange = exchange(connection) + >>> bound_exchange.delete() + + # the original exchange is not affected, and stays unbound. + >>> exchange.delete() + raise NotBoundError: Can't call delete on Exchange not bound to + a channel. + +Installation +============ + +You can install `Kombu` either via the Python Package Index (PyPI) +or from source. + +To install using `pip`,:: + + $ pip install kombu + +To install using `easy_install`,:: + + $ easy_install kombu + +If you have downloaded a source tarball you can install it +by doing the following,:: + + $ python setup.py build + # python setup.py install # as root + + +Terminology +=========== + +There are some concepts you should be familiar with before starting: + + * Producers + + Producers sends messages to an exchange. + + * Exchanges + + Messages are sent to exchanges. Exchanges are named and can be + configured to use one of several routing algorithms. The exchange + routes the messages to consumers by matching the routing key in the + message with the routing key the consumer provides when binding to + the exchange. + + * Consumers + + Consumers declares a queue, binds it to a exchange and receives + messages from it. + + * Queues + + Queues receive messages sent to exchanges. The queues are declared + by consumers. + + * Routing keys + + Every message has a routing key. The interpretation of the routing + key depends on the exchange type. There are four default exchange + types defined by the AMQP standard, and vendors can define custom + types (so see your vendors manual for details). + + These are the default exchange types defined by AMQP/0.8: + + * Direct exchange + + Matches if the routing key property of the message and + the `routing_key` attribute of the consumer are identical. + + * Fan-out exchange + + Always matches, even if the binding does not have a routing + key. + + * Topic exchange + + Matches the routing key property of the message by a primitive + pattern matching scheme. The message routing key then consists + of words separated by dots (`"."`, like domain names), and + two special characters are available; star (`"*"`) and hash + (`"#"`). The star matches any word, and the hash matches + zero or more words. For example `"*.stock.#"` matches the + routing keys `"usd.stock"` and `"eur.stock.db"` but not + `"stock.nasdaq"`. + +Getting Help +============ + +Mailing list +------------ + +Join the `carrot-users`_ mailing list. + +.. _`carrot-users`: http://groups.google.com/group/carrot-users/ + +Bug tracker +=========== + +If you have any suggestions, bug reports or annoyances please report them +to our issue tracker at http://github.com/celery/kombu/issues/ + +Contributing +============ + +Development of `Kombu` happens at Github: http://github.com/celery/kombu + +You are highly encouraged to participate in the development. If you don't +like Github (for some reason) you're welcome to send regular patches. + +License +======= + +This software is licensed under the `New BSD License`. See the `LICENSE` +file in the top distribution directory for the full license text. + +.. image:: https://d2weczhvl823v0.cloudfront.net/celery/kombu/trend.png + :alt: Bitdeli badge + :target: https://bitdeli.com/free diff --git a/docs/reference/index.rst b/docs/reference/index.rst new file mode 100644 index 0000000..cae7b8c --- /dev/null +++ b/docs/reference/index.rst @@ -0,0 +1,67 @@ +=========================== + API Reference +=========================== + +:Release: |version| +:Date: |today| + +.. toctree:: + :maxdepth: 2 + + kombu + kombu.common + kombu.mixins + kombu.simple + kombu.clocks + kombu.compat + kombu.pidbox + kombu.exceptions + kombu.log + kombu.connection + kombu.message + kombu.compression + kombu.pools + kombu.abstract + kombu.syn + kombu.async + kombu.async.hub + kombu.async.semaphore + kombu.async.timer + kombu.async.debug + kombu.transport + kombu.transport.pyamqp + kombu.transport.librabbitmq + kombu.transport.memory + kombu.transport.redis + kombu.transport.zmq + kombu.transport.beanstalk + kombu.transport.mongodb + kombu.transport.couchdb + kombu.transport.zookeeper + kombu.transport.filesystem + kombu.transport.django + kombu.transport.django.models + kombu.transport.django.managers + kombu.transport.django.management.commands.clean_kombu_messages + kombu.transport.sqlalchemy + kombu.transport.sqlalchemy.models + kombu.transport.SQS + kombu.transport.SLMQ + kombu.transport.pyro + kombu.transport.amqplib + kombu.transport.base + kombu.transport.virtual + kombu.transport.virtual.exchange + kombu.transport.virtual.scheduling + kombu.serialization + kombu.utils + kombu.utils.eventio + kombu.utils.limits + kombu.utils.compat + kombu.utils.debug + kombu.utils.encoding + kombu.utils.functional + kombu.utils.url + kombu.utils.text + kombu.utils.amq_manager + kombu.five diff --git a/docs/reference/kombu.abstract.rst b/docs/reference/kombu.abstract.rst new file mode 100644 index 0000000..0669a5a --- /dev/null +++ b/docs/reference/kombu.abstract.rst @@ -0,0 +1,10 @@ +.. currentmodule:: kombu.abstract + +.. automodule:: kombu.abstract + + .. contents:: + :local: + + .. autoclass:: MaybeChannelBound + :members: + :undoc-members: diff --git a/docs/reference/kombu.async.debug.rst b/docs/reference/kombu.async.debug.rst new file mode 100644 index 0000000..508333a --- /dev/null +++ b/docs/reference/kombu.async.debug.rst @@ -0,0 +1,11 @@ +========================================================== + Debugging Utils - kombu.async.debug +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.async.debug + +.. automodule:: kombu.async.debug + :members: + :undoc-members: diff --git a/docs/reference/kombu.async.hub.rst b/docs/reference/kombu.async.hub.rst new file mode 100644 index 0000000..a1ee144 --- /dev/null +++ b/docs/reference/kombu.async.hub.rst @@ -0,0 +1,11 @@ +========================================================== + Event Loop Implementation - kombu.async.hub +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.async.hub + +.. automodule:: kombu.async.hub + :members: + :undoc-members: diff --git a/docs/reference/kombu.async.rst b/docs/reference/kombu.async.rst new file mode 100644 index 0000000..3f575f0 --- /dev/null +++ b/docs/reference/kombu.async.rst @@ -0,0 +1,11 @@ +========================================================== + Event Loop - kombu.async +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.async + +.. automodule:: kombu.async + :members: + :undoc-members: diff --git a/docs/reference/kombu.async.semaphore.rst b/docs/reference/kombu.async.semaphore.rst new file mode 100644 index 0000000..6143623 --- /dev/null +++ b/docs/reference/kombu.async.semaphore.rst @@ -0,0 +1,11 @@ +========================================================== + Semaphores - kombu.async.semaphore +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.async.semaphore + +.. automodule:: kombu.async.semaphore + :members: + :undoc-members: diff --git a/docs/reference/kombu.async.timer.rst b/docs/reference/kombu.async.timer.rst new file mode 100644 index 0000000..5edeca5 --- /dev/null +++ b/docs/reference/kombu.async.timer.rst @@ -0,0 +1,11 @@ +========================================================== + Timer - kombu.async.timer +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.async.timer + +.. automodule:: kombu.async.timer + :members: + :undoc-members: diff --git a/docs/reference/kombu.clocks.rst b/docs/reference/kombu.clocks.rst new file mode 100644 index 0000000..9f49cff --- /dev/null +++ b/docs/reference/kombu.clocks.rst @@ -0,0 +1,11 @@ +========================================================== + Clocks and Synchronization - kombu.clocks +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.clocks + +.. automodule:: kombu.clocks + :members: + :undoc-members: diff --git a/docs/reference/kombu.common.rst b/docs/reference/kombu.common.rst new file mode 100644 index 0000000..01eee9b --- /dev/null +++ b/docs/reference/kombu.common.rst @@ -0,0 +1,11 @@ +========================================================== + Common Utilities - kombu.common +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.common + +.. automodule:: kombu.common + :members: + :undoc-members: diff --git a/docs/reference/kombu.compat.rst b/docs/reference/kombu.compat.rst new file mode 100644 index 0000000..3201a84 --- /dev/null +++ b/docs/reference/kombu.compat.rst @@ -0,0 +1,36 @@ +.. currentmodule:: kombu.compat + +.. automodule:: kombu.compat + + .. contents:: + :local: + + Publisher + --------- + + Replace with :class:`kombu.Producer`. + + .. autoclass:: Publisher + :members: + :undoc-members: + :inherited-members: + + Consumer + -------- + + Replace with :class:`kombu.Consumer`. + + .. autoclass:: Consumer + :members: + :undoc-members: + :inherited-members: + + ConsumerSet + ----------- + + Replace with :class:`kombu.Consumer`. + + .. autoclass:: ConsumerSet + :members: + :undoc-members: + :inherited-members: diff --git a/docs/reference/kombu.compression.rst b/docs/reference/kombu.compression.rst new file mode 100644 index 0000000..c774857 --- /dev/null +++ b/docs/reference/kombu.compression.rst @@ -0,0 +1,20 @@ +.. currentmodule:: kombu.compression + +.. automodule:: kombu.compression + + .. contents:: + :local: + + Encoding/decoding + ----------------- + + .. autofunction:: compress + .. autofunction:: decompress + + Registry + -------- + + .. autofunction:: encoders + .. autofunction:: get_encoder + .. autofunction:: get_decoder + .. autofunction:: register diff --git a/docs/reference/kombu.connection.rst b/docs/reference/kombu.connection.rst new file mode 100644 index 0000000..63870dd --- /dev/null +++ b/docs/reference/kombu.connection.rst @@ -0,0 +1,40 @@ + + +.. currentmodule:: kombu.connection + +.. automodule:: kombu.connection + + .. contents:: + :local: + + Connection + ---------- + + .. autoclass:: Connection + :members: + :undoc-members: + + Pools + ----- + + .. seealso:: + + The shortcut methods :meth:`Connection.Pool` and + :meth:`Connection.ChannelPool` is the recommended way + to instantiate these classes. + + .. autoclass:: ConnectionPool + + .. autoattribute:: LimitExceeded + + .. automethod:: acquire + .. automethod:: release + .. automethod:: force_close_all + + .. autoclass:: ChannelPool + + .. autoattribute:: LimitExceeded + + .. automethod:: acquire + .. automethod:: release + .. automethod:: force_close_all diff --git a/docs/reference/kombu.exceptions.rst b/docs/reference/kombu.exceptions.rst new file mode 100644 index 0000000..5a11bf9 --- /dev/null +++ b/docs/reference/kombu.exceptions.rst @@ -0,0 +1,14 @@ +.. currentmodule:: kombu.exceptions + +.. automodule:: kombu.exceptions + + .. contents:: + :local: + + .. autoexception:: NotBoundError + .. autoexception:: MessageStateError + .. autoexception:: TimeoutError + .. autoexception:: LimitExceeded + .. autoexception:: ConnectionLimitExceeded + .. autoexception:: ChannelLimitExceeded + diff --git a/docs/reference/kombu.five.rst b/docs/reference/kombu.five.rst new file mode 100644 index 0000000..3205cbe --- /dev/null +++ b/docs/reference/kombu.five.rst @@ -0,0 +1,11 @@ +========================================================== + Python2 to Python3 utilities - kombu.five +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.five + +.. automodule:: kombu.five + :members: + :undoc-members: diff --git a/docs/reference/kombu.log.rst b/docs/reference/kombu.log.rst new file mode 100644 index 0000000..3986376 --- /dev/null +++ b/docs/reference/kombu.log.rst @@ -0,0 +1,11 @@ +========================================================== + Logging - kombu.log +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.log + +.. automodule:: kombu.log + :members: + :undoc-members: diff --git a/docs/reference/kombu.message.rst b/docs/reference/kombu.message.rst new file mode 100644 index 0000000..b644ec0 --- /dev/null +++ b/docs/reference/kombu.message.rst @@ -0,0 +1,11 @@ +========================================================== + Message Objects - kombu.message +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.message + +.. automodule:: kombu.message + :members: + :undoc-members: diff --git a/docs/reference/kombu.mixins.rst b/docs/reference/kombu.mixins.rst new file mode 100644 index 0000000..7a87ac2 --- /dev/null +++ b/docs/reference/kombu.mixins.rst @@ -0,0 +1,11 @@ +========================================================== + Mixin Classes - kombu.mixins +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.mixins + +.. automodule:: kombu.mixins + :members: + :undoc-members: diff --git a/docs/reference/kombu.pidbox.rst b/docs/reference/kombu.pidbox.rst new file mode 100644 index 0000000..ebc72cd --- /dev/null +++ b/docs/reference/kombu.pidbox.rst @@ -0,0 +1,89 @@ +.. currentmodule:: kombu.pidbox + +.. automodule:: kombu.pidbox + + .. contents:: + :local: + + Introduction + ------------ + + Creating the applications Mailbox + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code-block:: python + + >>> mailbox = pidbox.Mailbox("celerybeat", type="direct") + + >>> @mailbox.handler + >>> def reload_schedule(state, **kwargs): + ... state["beat"].reload_schedule() + + >>> @mailbox.handler + >>> def connection_info(state, **kwargs): + ... return {"connection": state["connection"].info()} + + Example Node + ~~~~~~~~~~~~ + + .. code-block:: python + + >>> connection = kombu.Connection() + >>> state = {"beat": beat, + "connection": connection} + >>> consumer = mailbox(connection).Node(hostname).listen() + >>> try: + ... while True: + ... connection.drain_events(timeout=1) + ... finally: + ... consumer.cancel() + + Example Client + ~~~~~~~~~~~~~~ + + .. code-block:: python + + >>> mailbox.cast("reload_schedule") # cast is async. + >>> info = celerybeat.call("connection_info", timeout=1) + + Mailbox + ------- + + .. autoclass:: Mailbox + + .. autoattribute:: namespace + .. autoattribute:: connection + .. autoattribute:: type + .. autoattribute:: exchange + .. autoattribute:: reply_exchange + + .. automethod:: Node + .. automethod:: call + .. automethod:: cast + .. automethod:: abcast + .. automethod:: multi_call + .. automethod:: get_reply_queue + .. automethod:: get_queue + + Node + ---- + + .. autoclass:: Node + + .. autoattribute:: hostname + .. autoattribute:: mailbox + .. autoattribute:: handlers + .. autoattribute:: state + .. autoattribute:: channel + + .. automethod:: Consumer + .. automethod:: handler + .. automethod:: listen + .. automethod:: dispatch + .. automethod:: dispatch_from_message + .. automethod:: handle_call + .. automethod:: handle_cast + .. automethod:: handle + .. automethod:: handle_message + .. automethod:: reply + diff --git a/docs/reference/kombu.pools.rst b/docs/reference/kombu.pools.rst new file mode 100644 index 0000000..b1c210f --- /dev/null +++ b/docs/reference/kombu.pools.rst @@ -0,0 +1,11 @@ +========================================================== + General Pools - kombu.pools +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.pools + +.. automodule:: kombu.pools + :members: + :undoc-members: diff --git a/docs/reference/kombu.rst b/docs/reference/kombu.rst new file mode 100644 index 0000000..e970aaf --- /dev/null +++ b/docs/reference/kombu.rst @@ -0,0 +1,187 @@ +.. currentmodule:: kombu + +.. contents:: + :local: + +.. automodule:: kombu + + .. autofunction:: enable_insecure_serializers + + .. autofunction:: disable_insecure_serializers + + Connection + ---------- + + .. autoclass:: Connection + + .. admonition:: Attributes + + .. autoattribute:: hostname + .. autoattribute:: port + .. autoattribute:: userid + .. autoattribute:: password + .. autoattribute:: virtual_host + .. autoattribute:: ssl + .. autoattribute:: login_method + .. autoattribute:: failover_strategy + .. autoattribute:: connect_timeout + .. autoattribute:: heartbeat + + .. autoattribute:: default_channel + .. autoattribute:: connected + .. autoattribute:: recoverable_connection_errors + .. autoattribute:: recoverable_channel_errors + .. autoattribute:: connection_errors + .. autoattribute:: channel_errors + .. autoattribute:: transport + .. autoattribute:: connection + .. autoattribute:: uri_prefix + .. autoattribute:: declared_entities + .. autoattribute:: cycle + .. autoattribute:: host + .. autoattribute:: manager + .. autoattribute:: supports_heartbeats + .. autoattribute:: is_evented + + .. admonition:: Methods + + .. automethod:: as_uri + .. automethod:: connect + .. automethod:: channel + .. automethod:: drain_events + .. automethod:: release + .. automethod:: autoretry + .. automethod:: ensure_connection + .. automethod:: ensure + .. automethod:: revive + .. automethod:: create_transport + .. automethod:: get_transport_cls + .. automethod:: clone + .. automethod:: info + .. automethod:: switch + .. automethod:: maybe_switch_next + .. automethod:: heartbeat_check + .. automethod:: maybe_close_channel + .. automethod:: register_with_event_loop + .. automethod:: close + .. automethod:: _close + .. automethod:: completes_cycle + .. automethod:: get_manager + + .. automethod:: Producer + .. automethod:: Consumer + .. automethod:: Pool + .. automethod:: ChannelPool + .. automethod:: SimpleQueue + .. automethod:: SimpleBuffer + + Exchange + -------- + + Example creating an exchange declaration:: + + >>> news_exchange = Exchange('news', type='topic') + + For now `news_exchange` is just a declaration, you can't perform + actions on it. It just describes the name and options for the exchange. + + The exchange can be bound or unbound. Bound means the exchange is + associated with a channel and operations can be performed on it. + To bind the exchange you call the exchange with the channel as argument:: + + >>> bound_exchange = news_exchange(channel) + + Now you can perform operations like :meth:`declare` or :meth:`delete`:: + + >>> bound_exchange.declare() + >>> message = bound_exchange.Message('Cure for cancer found!') + >>> bound_exchange.publish(message, routing_key='news.science') + >>> bound_exchange.delete() + + .. autoclass:: Exchange + :members: + :undoc-members: + + .. automethod:: maybe_bind + + Queue + ----- + + Example creating a queue using our exchange in the :class:`Exchange` + example:: + + >>> science_news = Queue('science_news', + ... exchange=news_exchange, + ... routing_key='news.science') + + For now `science_news` is just a declaration, you can't perform + actions on it. It just describes the name and options for the queue. + + The queue can be bound or unbound. Bound means the queue is + associated with a channel and operations can be performed on it. + To bind the queue you call the queue instance with the channel as + an argument:: + + >>> bound_science_news = science_news(channel) + + Now you can perform operations like :meth:`declare` or :meth:`purge`: + + .. code-block:: python + + >>> bound_science_news.declare() + >>> bound_science_news.purge() + >>> bound_science_news.delete() + + .. autoclass:: Queue + :members: + :undoc-members: + + .. automethod:: maybe_bind + + Message Producer + ---------------- + + .. autoclass:: Producer + + .. autoattribute:: channel + .. autoattribute:: exchange + .. autoattribute:: routing_key + .. autoattribute:: serializer + .. autoattribute:: compression + .. autoattribute:: auto_declare + .. autoattribute:: on_return + .. autoattribute:: connection + + .. automethod:: declare + .. automethod:: maybe_declare + .. automethod:: publish + .. automethod:: revive + + Message Consumer + ---------------- + + .. autoclass:: Consumer + + .. autoattribute:: channel + .. autoattribute:: queues + .. autoattribute:: no_ack + .. autoattribute:: auto_declare + .. autoattribute:: callbacks + .. autoattribute:: on_message + .. autoattribute:: on_decode_error + .. autoattribute:: connection + + .. automethod:: declare + .. automethod:: register_callback + .. automethod:: add_queue + .. automethod:: add_queue_from_dict + .. automethod:: consume + .. automethod:: cancel + .. automethod:: cancel_by_queue + .. automethod:: consuming_from + .. automethod:: purge + .. automethod:: flow + .. automethod:: qos + .. automethod:: recover + .. automethod:: receive + .. automethod:: revive diff --git a/docs/reference/kombu.serialization.rst b/docs/reference/kombu.serialization.rst new file mode 100644 index 0000000..7ed56ff --- /dev/null +++ b/docs/reference/kombu.serialization.rst @@ -0,0 +1,47 @@ +.. currentmodule:: kombu.serialization + +.. automodule:: kombu.serialization + + .. contents:: + :local: + + Overview + -------- + + Centralized support for encoding/decoding of data structures. + Contains json, pickle, msgpack, and yaml serializers. + + Optionally installs support for YAML if the `PyYAML`_ package + is installed. + + Optionally installs support for `msgpack`_ if the `msgpack-python`_ + package is installed. + + + Exceptions + ---------- + + .. autoexception:: SerializerNotInstalled + + Serialization + ------------- + + .. autofunction:: encode + + .. autofunction:: decode + + .. autofunction:: raw_encode + + Registry + -------- + + .. autofunction:: register + + .. autodata:: registry + +.. _`cjson`: http://pypi.python.org/pypi/python-cjson/ +.. _`simplejson`: http://code.google.com/p/simplejson/ +.. _`Python 2.6+`: http://docs.python.org/library/json.html +.. _`PyYAML`: http://pyyaml.org/ +.. _`msgpack`: http://msgpack.sourceforge.net/ +.. _`msgpack-python`: http://pypi.python.org/pypi/msgpack-python/ diff --git a/docs/reference/kombu.simple.rst b/docs/reference/kombu.simple.rst new file mode 100644 index 0000000..b43e2ec --- /dev/null +++ b/docs/reference/kombu.simple.rst @@ -0,0 +1,89 @@ +.. currentmodule:: kombu.simple + +.. automodule:: kombu.simple + + .. contents:: + :local: + + Persistent + ---------- + + .. autoclass:: SimpleQueue + + .. attribute:: channel + + Current channel + + .. attribute:: producer + + :class:`~kombu.Producer` used to publish messages. + + .. attribute:: consumer + + :class:`~kombu.Consumer` used to receive messages. + + .. attribute:: no_ack + + flag to enable/disable acknowledgements. + + .. attribute:: queue + + :class:`~kombu.Queue` to consume from (if consuming). + + .. attribute:: queue_opts + + Additional options for the queue declaration. + + .. attribute:: exchange_opts + + Additional options for the exchange declaration. + + .. automethod:: get + .. automethod:: get_nowait + .. automethod:: put + .. automethod:: clear + .. automethod:: __len__ + .. automethod:: qsize + .. automethod:: close + + Buffer + ------ + + .. autoclass:: SimpleBuffer + + .. attribute:: channel + + Current channel + + .. attribute:: producer + + :class:`~kombu.Producer` used to publish messages. + + .. attribute:: consumer + + :class:`~kombu.Consumer` used to receive messages. + + .. attribute:: no_ack + + flag to enable/disable acknowledgements. + + .. attribute:: queue + + :class:`~kombu.Queue` to consume from (if consuming). + + .. attribute:: queue_opts + + Additional options for the queue declaration. + + .. attribute:: exchange_opts + + Additional options for the exchange declaration. + + .. automethod:: get + .. automethod:: get_nowait + .. automethod:: put + .. automethod:: clear + .. automethod:: __len__ + .. automethod:: qsize + .. automethod:: close + diff --git a/docs/reference/kombu.syn.rst b/docs/reference/kombu.syn.rst new file mode 100644 index 0000000..f5c650b --- /dev/null +++ b/docs/reference/kombu.syn.rst @@ -0,0 +1,11 @@ +========================================================== + Async Utilities - kombu.syn +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.syn + +.. automodule:: kombu.syn + + .. autofunction:: detect_environment diff --git a/docs/reference/kombu.transport.SLMQ.rst b/docs/reference/kombu.transport.SLMQ.rst new file mode 100644 index 0000000..b8c7a83 --- /dev/null +++ b/docs/reference/kombu.transport.SLMQ.rst @@ -0,0 +1,24 @@ +====================================== +kombu.transport.SLMQ +====================================== + +.. currentmodule:: kombu.transport.SLMQ + +.. automodule:: kombu.transport.SLMQ + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.SQS.rst b/docs/reference/kombu.transport.SQS.rst new file mode 100644 index 0000000..97a5df6 --- /dev/null +++ b/docs/reference/kombu.transport.SQS.rst @@ -0,0 +1,20 @@ +.. currentmodule:: kombu.transport.SQS + +.. automodule:: kombu.transport.SQS + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.amqplib.rst b/docs/reference/kombu.transport.amqplib.rst new file mode 100644 index 0000000..06fc48b --- /dev/null +++ b/docs/reference/kombu.transport.amqplib.rst @@ -0,0 +1,36 @@ +.. currentmodule:: kombu.transport.amqplib + +.. automodule:: kombu.transport.amqplib + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Connection + ---------- + + .. autoclass:: Connection + :members: + :undoc-members: + :inherited-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: + + Message + ------- + + .. autoclass:: Message + :members: + :undoc-members: + diff --git a/docs/reference/kombu.transport.base.rst b/docs/reference/kombu.transport.base.rst new file mode 100644 index 0000000..c9d77df --- /dev/null +++ b/docs/reference/kombu.transport.base.rst @@ -0,0 +1,62 @@ +.. currentmodule:: kombu.transport.base + +.. automodule:: kombu.transport.base + + .. contents:: + :local: + + Message + ------- + + .. autoclass:: Message + + .. autoattribute:: payload + .. autoattribute:: channel + .. autoattribute:: delivery_tag + .. autoattribute:: content_type + .. autoattribute:: content_encoding + .. autoattribute:: delivery_info + .. autoattribute:: headers + .. autoattribute:: properties + .. autoattribute:: body + .. autoattribute:: acknowledged + + .. automethod:: ack + .. automethod:: reject + .. automethod:: requeue + .. automethod:: decode + + Transport + --------- + + .. autoclass:: Transport + + .. autoattribute:: client + .. autoattribute:: default_port + + .. attribute:: recoverable_connection_errors + + Optional list of connection related exceptions that can be + recovered from, but where the connection must be closed + and re-established first. + + If not defined then all :attr:`connection_errors` and + :class:`channel_errors` will be regarded as recoverable, + but needing to close the connection first. + + .. attribute:: recoverable_channel_errors + + Optional list of channel related exceptions that can be + automatically recovered from without re-establishing the + connection. + + .. autoattribute:: connection_errors + .. autoattribute:: channel_errors + + .. automethod:: establish_connection + .. automethod:: close_connection + .. automethod:: create_channel + .. automethod:: close_channel + .. automethod:: drain_events + + diff --git a/docs/reference/kombu.transport.beanstalk.rst b/docs/reference/kombu.transport.beanstalk.rst new file mode 100644 index 0000000..4cde78e --- /dev/null +++ b/docs/reference/kombu.transport.beanstalk.rst @@ -0,0 +1,20 @@ +.. currentmodule:: kombu.transport.beanstalk + +.. automodule:: kombu.transport.beanstalk + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.couchdb.rst b/docs/reference/kombu.transport.couchdb.rst new file mode 100644 index 0000000..26ecb4e --- /dev/null +++ b/docs/reference/kombu.transport.couchdb.rst @@ -0,0 +1,25 @@ +.. currentmodule:: kombu.transport.couchdb + +.. automodule:: kombu.transport.couchdb + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: + + Functions + --------- + + .. autofunction:: create_message_view diff --git a/docs/reference/kombu.transport.django.management.commands.clean_kombu_messages.rst b/docs/reference/kombu.transport.django.management.commands.clean_kombu_messages.rst new file mode 100644 index 0000000..7949eea --- /dev/null +++ b/docs/reference/kombu.transport.django.management.commands.clean_kombu_messages.rst @@ -0,0 +1,14 @@ +========================================================== + Django Management - clean_kombu_messages +========================================================== + +.. contents:: + :local: +.. currentmodule:: + kombu.transport.django.management.commands.clean_kombu_messages + +.. automodule:: + kombu.transport.django.management.commands.clean_kombu_messages + + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.django.managers.rst b/docs/reference/kombu.transport.django.managers.rst new file mode 100644 index 0000000..9afe7a6 --- /dev/null +++ b/docs/reference/kombu.transport.django.managers.rst @@ -0,0 +1,11 @@ +========================================================== + Django Managers - kombu.transport.django.managers +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.transport.django.managers + +.. automodule:: kombu.transport.django.managers + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.django.models.rst b/docs/reference/kombu.transport.django.models.rst new file mode 100644 index 0000000..4466c0f --- /dev/null +++ b/docs/reference/kombu.transport.django.models.rst @@ -0,0 +1,11 @@ +========================================================== + Django Models - kombu.transport.django.models +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.transport.django.models + +.. automodule:: kombu.transport.django.models + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.django.rst b/docs/reference/kombu.transport.django.rst new file mode 100644 index 0000000..4203c77 --- /dev/null +++ b/docs/reference/kombu.transport.django.rst @@ -0,0 +1,24 @@ +========================================= + kombu.transport.django +========================================= + +.. currentmodule:: kombu.transport.django + +.. automodule:: kombu.transport.django + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.filesystem.rst b/docs/reference/kombu.transport.filesystem.rst new file mode 100644 index 0000000..d059edb --- /dev/null +++ b/docs/reference/kombu.transport.filesystem.rst @@ -0,0 +1,21 @@ +.. currentmodule:: kombu.transport.filesystem + +.. automodule:: kombu.transport.filesystem + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: + diff --git a/docs/reference/kombu.transport.librabbitmq.rst b/docs/reference/kombu.transport.librabbitmq.rst new file mode 100644 index 0000000..cc694d7 --- /dev/null +++ b/docs/reference/kombu.transport.librabbitmq.rst @@ -0,0 +1,35 @@ +.. currentmodule:: kombu.transport.librabbitmq + +.. automodule:: kombu.transport.librabbitmq + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Connection + ---------- + + .. autoclass:: Connection + :members: + :undoc-members: + :inherited-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: + + Message + ------- + + .. autoclass:: Message + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.memory.rst b/docs/reference/kombu.transport.memory.rst new file mode 100644 index 0000000..c712b13 --- /dev/null +++ b/docs/reference/kombu.transport.memory.rst @@ -0,0 +1,20 @@ +.. currentmodule:: kombu.transport.memory + +.. automodule:: kombu.transport.memory + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.mongodb.rst b/docs/reference/kombu.transport.mongodb.rst new file mode 100644 index 0000000..f4d2553 --- /dev/null +++ b/docs/reference/kombu.transport.mongodb.rst @@ -0,0 +1,20 @@ +.. currentmodule:: kombu.transport.mongodb + +.. automodule:: kombu.transport.mongodb + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.pyamqp.rst b/docs/reference/kombu.transport.pyamqp.rst new file mode 100644 index 0000000..33ebf0b --- /dev/null +++ b/docs/reference/kombu.transport.pyamqp.rst @@ -0,0 +1,36 @@ +.. currentmodule:: kombu.transport.pyamqp + +.. automodule:: kombu.transport.pyamqp + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Connection + ---------- + + .. autoclass:: Connection + :members: + :undoc-members: + :inherited-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: + + Message + ------- + + .. autoclass:: Message + :members: + :undoc-members: + diff --git a/docs/reference/kombu.transport.pyro.rst b/docs/reference/kombu.transport.pyro.rst new file mode 100644 index 0000000..5bbf337 --- /dev/null +++ b/docs/reference/kombu.transport.pyro.rst @@ -0,0 +1,20 @@ +.. currentmodule:: kombu.transport.pyro + +.. automodule:: kombu.transport.pyro + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.redis.rst b/docs/reference/kombu.transport.redis.rst new file mode 100644 index 0000000..6a3f4fe --- /dev/null +++ b/docs/reference/kombu.transport.redis.rst @@ -0,0 +1,20 @@ +.. currentmodule:: kombu.transport.redis + +.. automodule:: kombu.transport.redis + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.rst b/docs/reference/kombu.transport.rst new file mode 100644 index 0000000..806d9d9 --- /dev/null +++ b/docs/reference/kombu.transport.rst @@ -0,0 +1,23 @@ +.. currentmodule:: kombu.transport + +.. automodule:: kombu.transport + + .. contents:: + :local: + + Data + ---- + + .. data:: DEFAULT_TRANSPORT + + Default transport used when no transport specified. + + .. data:: TRANSPORT_ALIASES + + Mapping of transport aliases/class names. + + Functions + --------- + + .. autofunction:: get_transport_cls + .. autofunction:: resolve_transport diff --git a/docs/reference/kombu.transport.sqlalchemy.models.rst b/docs/reference/kombu.transport.sqlalchemy.models.rst new file mode 100644 index 0000000..5a40044 --- /dev/null +++ b/docs/reference/kombu.transport.sqlalchemy.models.rst @@ -0,0 +1,27 @@ +.. currentmodule:: kombu.transport.sqlalchemy.models + +.. automodule:: kombu.transport.sqlalchemy.models + + .. contents:: + :local: + + Models + ------ + + .. autoclass:: Queue + + .. autoattribute:: Queue.id + + .. autoattribute:: Queue.name + + .. autoclass:: Message + + .. autoattribute:: Message.id + + .. autoattribute:: Message.visible + + .. autoattribute:: Message.sent_at + + .. autoattribute:: Message.payload + + .. autoattribute:: Message.version diff --git a/docs/reference/kombu.transport.sqlalchemy.rst b/docs/reference/kombu.transport.sqlalchemy.rst new file mode 100644 index 0000000..3b4797c --- /dev/null +++ b/docs/reference/kombu.transport.sqlalchemy.rst @@ -0,0 +1,25 @@ +==================================== + kombu.transport.sqlalchemy +==================================== + + +.. currentmodule:: kombu.transport.sqlalchemy + +.. automodule:: kombu.transport.sqlalchemy + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.virtual.exchange.rst b/docs/reference/kombu.transport.virtual.exchange.rst new file mode 100644 index 0000000..220b017 --- /dev/null +++ b/docs/reference/kombu.transport.virtual.exchange.rst @@ -0,0 +1,35 @@ +.. currentmodule:: kombu.transport.virtual.exchange + +.. automodule:: kombu.transport.virtual.exchange + + .. contents:: + :local: + + Direct + ------ + + .. autoclass:: DirectExchange + :members: + :undoc-members: + + Topic + ----- + + .. autoclass:: TopicExchange + :members: + :undoc-members: + + Fanout + ------ + + .. autoclass:: FanoutExchange + :members: + :undoc-members: + + Interface + --------- + + .. autoclass:: ExchangeType + :members: + :undoc-members: + diff --git a/docs/reference/kombu.transport.virtual.rst b/docs/reference/kombu.transport.virtual.rst new file mode 100644 index 0000000..4bac1dd --- /dev/null +++ b/docs/reference/kombu.transport.virtual.rst @@ -0,0 +1,117 @@ +.. currentmodule:: kombu.transport.virtual + +.. automodule:: kombu.transport.virtual + + .. contents:: + :local: + + Transports + ---------- + + .. autoclass:: Transport + + .. autoattribute:: Channel + + .. autoattribute:: Cycle + + .. autoattribute:: polling_interval + + .. autoattribute:: default_port + + .. autoattribute:: state + + .. autoattribute:: cycle + + .. automethod:: establish_connection + + .. automethod:: close_connection + + .. automethod:: create_channel + + .. automethod:: close_channel + + .. automethod:: drain_events + + Channel + ------- + + .. autoclass:: AbstractChannel + :members: + + .. autoclass:: Channel + + .. autoattribute:: Message + + .. autoattribute:: state + + .. autoattribute:: qos + + .. autoattribute:: do_restore + + .. autoattribute:: exchange_types + + .. automethod:: exchange_declare + + .. automethod:: exchange_delete + + .. automethod:: queue_declare + + .. automethod:: queue_delete + + .. automethod:: queue_bind + + .. automethod:: queue_purge + + .. automethod:: basic_publish + + .. automethod:: basic_consume + + .. automethod:: basic_cancel + + .. automethod:: basic_get + + .. automethod:: basic_ack + + .. automethod:: basic_recover + + .. automethod:: basic_reject + + .. automethod:: basic_qos + + .. automethod:: get_table + + .. automethod:: typeof + + .. automethod:: drain_events + + .. automethod:: prepare_message + + .. automethod:: message_to_python + + .. automethod:: flow + + .. automethod:: close + + Message + ------- + + .. autoclass:: Message + :members: + :undoc-members: + :inherited-members: + + Quality Of Service + ------------------ + + .. autoclass:: QoS + :members: + :undoc-members: + :inherited-members: + + In-memory State + --------------- + + .. autoclass:: BrokerState + :members: + :undoc-members: + :inherited-members: diff --git a/docs/reference/kombu.transport.virtual.scheduling.rst b/docs/reference/kombu.transport.virtual.scheduling.rst new file mode 100644 index 0000000..5eca4b9 --- /dev/null +++ b/docs/reference/kombu.transport.virtual.scheduling.rst @@ -0,0 +1,7 @@ +.. contents:: + :local: +.. currentmodule:: kombu.transport.virtual.scheduling + +.. automodule:: kombu.transport.virtual.scheduling + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.zmq.rst b/docs/reference/kombu.transport.zmq.rst new file mode 100644 index 0000000..08d0ea5 --- /dev/null +++ b/docs/reference/kombu.transport.zmq.rst @@ -0,0 +1,13 @@ +===================== + kombu.transport.zmq +===================== + +.. currentmodule:: kombu.transport.zmq + +.. automodule:: kombu.transport.zmq + + .. contents:: + :local: + + :members: + :undoc-members: diff --git a/docs/reference/kombu.transport.zookeeper.rst b/docs/reference/kombu.transport.zookeeper.rst new file mode 100644 index 0000000..af900a3 --- /dev/null +++ b/docs/reference/kombu.transport.zookeeper.rst @@ -0,0 +1,25 @@ +=========================== + kombu.transport.zookeeper +=========================== + +.. currentmodule:: kombu.transport.zookeeper + +.. automodule:: kombu.transport.zookeeper + + .. contents:: + :local: + + Transport + --------- + + .. autoclass:: Transport + :members: + :undoc-members: + + Channel + ------- + + .. autoclass:: Channel + :members: + :undoc-members: + diff --git a/docs/reference/kombu.utils.amq_manager.rst b/docs/reference/kombu.utils.amq_manager.rst new file mode 100644 index 0000000..13e191e --- /dev/null +++ b/docs/reference/kombu.utils.amq_manager.rst @@ -0,0 +1,11 @@ +==================================================== + Generic RabbitMQ manager - kombu.utils.amq_manager +==================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.utils.amq_manager + +.. automodule:: kombu.utils.amq_manager + :members: + :undoc-members: diff --git a/docs/reference/kombu.utils.compat.rst b/docs/reference/kombu.utils.compat.rst new file mode 100644 index 0000000..3172ed3 --- /dev/null +++ b/docs/reference/kombu.utils.compat.rst @@ -0,0 +1,11 @@ +========================================================== + Compat. utilities - kombu.utils.compat +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.utils.compat + +.. automodule:: kombu.utils.compat + :members: + :undoc-members: diff --git a/docs/reference/kombu.utils.debug.rst b/docs/reference/kombu.utils.debug.rst new file mode 100644 index 0000000..35cbc4c --- /dev/null +++ b/docs/reference/kombu.utils.debug.rst @@ -0,0 +1,11 @@ +========================================================== + Debugging - kombu.utils.debug +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.utils.debug + +.. automodule:: kombu.utils.debug + :members: + :undoc-members: diff --git a/docs/reference/kombu.utils.encoding.rst b/docs/reference/kombu.utils.encoding.rst new file mode 100644 index 0000000..2ac4c51 --- /dev/null +++ b/docs/reference/kombu.utils.encoding.rst @@ -0,0 +1,11 @@ +========================================================== + String Encoding - kombu.utils.encoding +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.utils.encoding + +.. automodule:: kombu.utils.encoding + :members: + :undoc-members: diff --git a/docs/reference/kombu.utils.eventio.rst b/docs/reference/kombu.utils.eventio.rst new file mode 100644 index 0000000..16c40f3 --- /dev/null +++ b/docs/reference/kombu.utils.eventio.rst @@ -0,0 +1,11 @@ +========================================================== + Evented I/O - kombu.utils.eventio +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.utils.eventio + +.. automodule:: kombu.utils.eventio + :members: + :undoc-members: diff --git a/docs/reference/kombu.utils.functional.rst b/docs/reference/kombu.utils.functional.rst new file mode 100644 index 0000000..ffe1fbb --- /dev/null +++ b/docs/reference/kombu.utils.functional.rst @@ -0,0 +1,11 @@ +========================================================== + kombu.utils.functional +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.utils.functional + +.. automodule:: kombu.utils.functional + :members: + :undoc-members: diff --git a/docs/reference/kombu.utils.limits.rst b/docs/reference/kombu.utils.limits.rst new file mode 100644 index 0000000..59df550 --- /dev/null +++ b/docs/reference/kombu.utils.limits.rst @@ -0,0 +1,11 @@ +========================================================== + Rate limiting - kombu.utils.limits +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.utils.limits + +.. automodule:: kombu.utils.limits + :members: + :undoc-members: diff --git a/docs/reference/kombu.utils.rst b/docs/reference/kombu.utils.rst new file mode 100644 index 0000000..8df34c5 --- /dev/null +++ b/docs/reference/kombu.utils.rst @@ -0,0 +1,11 @@ +========================================================== + Utilities - kombu.utils +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.utils + +.. automodule:: kombu.utils + :members: + :undoc-members: diff --git a/docs/reference/kombu.utils.text.rst b/docs/reference/kombu.utils.text.rst new file mode 100644 index 0000000..ca6354f --- /dev/null +++ b/docs/reference/kombu.utils.text.rst @@ -0,0 +1,11 @@ +========================================================== + Text utilitites - kombu.utils.text +========================================================== + +.. contents:: + :local: +.. currentmodule:: kombu.utils.text + +.. automodule:: kombu.utils.text + :members: + :undoc-members: diff --git a/docs/reference/kombu.utils.url.rst b/docs/reference/kombu.utils.url.rst new file mode 100644 index 0000000..f223fba --- /dev/null +++ b/docs/reference/kombu.utils.url.rst @@ -0,0 +1,11 @@ +============================================== + kombu.utils.url +============================================== + +.. contents:: + :local: +.. currentmodule:: kombu.utils.url + +.. automodule:: kombu.utils.url + :members: + :undoc-members: diff --git a/docs/userguide/connections.rst b/docs/userguide/connections.rst new file mode 100644 index 0000000..f97b4b7 --- /dev/null +++ b/docs/userguide/connections.rst @@ -0,0 +1,178 @@ +.. _guide-connections: + +============================ + Connections and transports +============================ + +.. _connection-basics: + +Basics +====== + +To send and receive messages you need a transport and a connection. +There are several transports to choose from (amqp, librabbitmq, redis, in-memory, etc.), +and you can even create your own. The default transport is amqp. + +Create a connection using the default transport:: + + >>> from kombu import Connection + >>> connection = Connection('amqp://guest:guest@localhost:5672//') + +The connection will not be established yet, as the connection is established +when needed. If you want to explicitly establish the connection +you have to call the :meth:`~kombu.Connection.connect` +method:: + + >>> connection.connect() + +You can also check whether the connection is connected:: + + >>> connection.connected + True + +Connections must always be closed after use:: + + >>> connection.close() + +But best practice is to release the connection instead, +this will release the resource if the connection is associated +with a connection pool, or close the connection if not, +and makes it easier to do the transition to connection pools later:: + + >>> connection.release() + +.. seealso:: + + :ref:`guide-pools` + +Of course, the connection can be used as a context, and you are +encouraged to do so as it makes it harder to forget releasing open +resources:: + + with Connection() as connection: + # work with connection + +.. _connection-urls: + +URLs +==== + +Connection parameters can be provided as an URL in the format:: + + transport://userid:password@hostname:port/virtual_host + +All of these are valid URLs:: + + # Specifies using the amqp transport only, default values + # are taken from the keyword arguments. + amqp:// + + # Using Redis + redis://localhost:6379/ + + # Using Redis over a Unix socket + redis+socket:///tmp/redis.sock + + # Using virtual host '/foo' + amqp://localhost//foo + + # Using virtual host 'foo' + amqp://localhost/foo + +The query part of the URL can also be used to set options, e.g.:: + + amqp://localhost/myvhost?ssl=1 + +See :ref:`connection-options` for a list of supported options. + +A connection without options will use the default connection settings, +which is using the localhost host, default port, user name `guest`, +password `guest` and virtual host "/". A connection without arguments +is the same as:: + + >>> Connection('amqp://guest:guest@localhost:5672//') + +The default port is transport specific, for AMQP this is 5672. + +Other fields may also have different meaning depending on the transport +used. For example, the Redis transport uses the `virtual_host` argument as +the redis database number. + +.. _connection-options: + +Keyword arguments +================= + +The :class:`~kombu.Connection` class supports additional +keyword arguments, these are: + +:hostname: Default host name if not provided in the URL. +:userid: Default user name if not provided in the URL. +:password: Default password if not provided in the URL. +:virtual_host: Default virtual host if not provided in the URL. +:port: Default port if not provided in the URL. +:transport: Default transport if not provided in the URL. + Can be a string specifying the path to the class. (e.g. + ``kombu.transport.pyamqp:Transport``), or one of the aliases: + ``pyamqp``, ``librabbitmq``, ``redis``, ``memory``, and so on. + +:ssl: Use SSL to connect to the server. Default is ``False``. + Only supported by the amqp transport. +:insist: Insist on connecting to a server. + *No longer supported, relic from AMQP 0.8* +:connect_timeout: Timeout in seconds for connecting to the + server. May not be supported by the specified transport. +:transport_options: A dict of additional connection arguments to + pass to alternate kombu channel implementations. Consult the transport + documentation for available options. + +AMQP Transports +=============== + +There are 3 transports available for AMQP use. + +1. ``pyamqp`` uses the pure Python library ``amqp``, automatically + installed with Kombu. +2. ``librabbitmq`` uses the high performance transport written in C. + This requires the ``librabbitmq`` Python package to be installed, which + automatically compiles the C library. +3. ``amqp`` tries to use ``librabbitmq`` but falls back to ``pyamqp``. + +For the highest performance, you should install the ``librabbitmq`` package. +To ensure librabbitmq is used, you can explicitly specify it in the +transport URL, or use ``amqp`` to have the fallback. + +Transport Comparison +==================== + ++---------------+----------+------------+------------+---------------+ +| **Client** | **Type** | **Direct** | **Topic** | **Fanout** | ++---------------+----------+------------+------------+---------------+ +| *amqp* | Native | Yes | Yes | Yes | ++---------------+----------+------------+------------+---------------+ +| *redis* | Virtual | Yes | Yes | Yes (PUB/SUB) | ++---------------+----------+------------+------------+---------------+ +| *mongodb* | Virtual | Yes | Yes | Yes | ++---------------+----------+------------+------------+---------------+ +| *beanstalk* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *SQS* | Virtual | Yes | Yes [#f1]_ | Yes [#f2]_ | ++---------------+----------+------------+------------+---------------+ +| *couchdb* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *zookeeper* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *in-memory* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *django* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *sqlalchemy* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ + + +.. [#f1] Declarations only kept in memory, so exchanges/queues + must be declared by all clients that needs them. + +.. [#f2] Fanout supported via storing routing tables in SimpleDB. + Disabled by default, but can be enabled by using the + ``supports_fanout`` transport option. diff --git a/docs/userguide/consumers.rst b/docs/userguide/consumers.rst new file mode 100644 index 0000000..cccbd4a --- /dev/null +++ b/docs/userguide/consumers.rst @@ -0,0 +1,103 @@ +.. _guide-consumers: + +=========== + Consumers +=========== + +.. _consumer-basics: + +Basics +====== + +The :class:`Consumer` takes a connection (or channel) and a list of queues to +consume from. Several consumers can be mixed to consume from different +channels, as they all bind to the same connection, and ``drain_events`` will +drain events from all channels on that connection. + +.. note:: + + Kombu since 3.0 will only accept json/binary or text messages by default, + to allow deserialization of other formats you have to specify them + in the ``accept`` argument:: + + Consumer(conn, accept=['json', 'pickle', 'msgpack', 'yaml']) + + +Draining events from a single consumer: + +.. code-block:: python + + with Consumer(connection, queues, accept=['json']): + connection.drain_events(timeout=1) + + +Draining events from several consumers: + +.. code-block:: python + + from kombu.utils import nested + + with connection.channel(), connection.channel() as (channel1, channel2): + with nested(Consumer(channel1, queues1, accept=['json']), + Consumer(channel2, queues2, accept=['json'])): + connection.drain_events(timeout=1) + + +Or using :class:`~kombu.mixins.ConsumerMixin`: + +.. code-block:: python + + from kombu.mixins import ConsumerMixin + + class C(ConsumerMixin): + + def __init__(self, connection): + self.connection = connection + + def get_consumers(self, Consumer, channel): + return [ + Consumer(queues, callbacks=[self.on_message], accept=['json']), + ] + + def on_message(self, body, message): + print("RECEIVED MESSAGE: %r" % (body, )) + message.ack() + + C(connection).run() + + +and with multiple channels again: + +.. code-block:: python + + from kombu import Consumer + from kombu.mixins import ConsumerMixin + + class C(ConsumerMixin): + channel2 = None + + def __init__(self, connection): + self.connection = connection + + def get_consumers(self, _, default_channel): + self.channel2 = default_channel.connection.channel() + return [Consumer(default_channel, queues1, + callbacks=[self.on_message], + accept=['json']), + Consumer(self.channel2, queues2, + callbacks=[self.on_special_message], + accept=['json'])] + + def on_consumer_end(self, connection, default_channel): + if self.channel2: + self.channel2.close() + + C(connection).run() + + +Reference +========= + +.. autoclass:: kombu.Consumer + :noindex: + :members: diff --git a/docs/userguide/examples.rst b/docs/userguide/examples.rst new file mode 100644 index 0000000..0a8a4d8 --- /dev/null +++ b/docs/userguide/examples.rst @@ -0,0 +1,57 @@ +.. _examples: + +======================== + Examples +======================== + +.. _hello-world-example: + +Hello World Example +=================== + +Below example uses +:ref:`guide-simple` +to send helloworld message through +message broker (rabbitmq) and print received message + + +:file:`hello_publisher.py`: + +.. literalinclude:: ../../examples/hello_publisher.py + :language: python + +:file:`hello_consumer.py`: + +.. literalinclude:: ../../examples/hello_consumer.py + :language: python + + +.. _task-queue-example: + +Task Queue Example +================== + +Very simple task queue using pickle, with primitive support +for priorities using different queues. + + +:file:`queues.py`: + +.. literalinclude:: ../../examples/simple_task_queue/queues.py + :language: python + +:file:`worker.py`: + +.. literalinclude:: ../../examples/simple_task_queue/worker.py + :language: python + +:file:`tasks.py`: + +.. literalinclude:: ../../examples/simple_task_queue/tasks.py + :language: python + +.. code-block:: python + +:file:`client.py`: + +.. literalinclude:: ../../examples/simple_task_queue/client.py diff --git a/docs/userguide/index.rst b/docs/userguide/index.rst new file mode 100644 index 0000000..f195b48 --- /dev/null +++ b/docs/userguide/index.rst @@ -0,0 +1,18 @@ +============ + User Guide +============ + +:Release: |version| +:Date: |today| + +.. toctree:: + :maxdepth: 2 + + introduction + connections + producers + consumers + examples + simple + pools + serialization diff --git a/docs/userguide/introduction.rst b/docs/userguide/introduction.rst new file mode 100644 index 0000000..b540e1e --- /dev/null +++ b/docs/userguide/introduction.rst @@ -0,0 +1,100 @@ +.. _guide-intro: + +============== + Introduction +============== + +.. _intro-messaging: + +What is messaging? +================== + +In times long ago people didn't have email. +They had the postal service, which with great courage would deliver mail +from hand to hand all over the globe. Soldiers deployed at wars far away could only +communicate with their families through the postal service, and +posting a letter would mean that the recipient wouldn't actually +receive the letter until weeks or months, sometimes years later. + +It's hard to imagine this today when people are expected to be available +for phone calls every minute of the day. + +So humans need to communicate with each other, this shouldn't +be news to anyone, but why would applications? + +One example is banks. +When you transfer money from one bank to another, your bank sends +a message to a central clearinghouse. The clearinghouse +then records and coordinates the transaction. Banks +need to send and receive millions and millions of +messages every day, and losing a single message would mean either losing +your money (bad) or the banks money (very bad) + +Another example is the stock exchanges, which also have a need +for very high message throughputs and have strict reliability +requirements. + +Email is a great way for people to communicate. It is much faster +than using the postal service, but still using email as a means for +programs to communicate would be like the soldier above, waiting +for signs of life from his girlfriend back home. + +.. _messaging-scenarios: + +Messaging Scenarios +=================== + +* Request/Reply + + The request/reply pattern works like the postal service example. + A message is addressed to a single recipient, with a return address + printed on the back. The recipient may or may not reply to the + message by sending it back to the original sender. + + Request-Reply is achieved using *direct* exchanges. + +* Broadcast + + In a broadcast scenario a message is sent to all parties. + This could be none, one or many recipients. + + Broadcast is achieved using *fanout* exchanges. + +* Publish/Subscribe + + In a publish/subscribe scenario producers publish messages + to topics, and consumers subscribe to the topics they are + interested in. + + If no consumers subscribe to the topic, then the message + will not be delivered to anyone. If several consumers + subscribe to the topic, then the message will be delivered + to all of them. + + Pub-sub is achieved using *topic* exchanges. + +.. _messaging-reliability: + +Reliability +=========== + +For some applications reliability is very important. Losing a message is +a critical situation that must never happen. For other applications +losing a message is fine, it can maybe recover in other ways, +or the message is resent anyway as periodic updates. + +AMQP defines two built-in delivery modes: + +* persistent + + Messages are written to disk and survives a broker restart. + +* transient + + Messages may or may not be written to disk, as the broker sees fit + to optimize memory contents. The messages will not survive a broker + restart. + +Transient messaging is by far the fastest way to send and receive messages, +so having persistent messages comes with a price, but for some +applications this is a necessary cost. diff --git a/docs/userguide/pools.rst b/docs/userguide/pools.rst new file mode 100644 index 0000000..e8d6000 --- /dev/null +++ b/docs/userguide/pools.rst @@ -0,0 +1,175 @@ +.. _guide-pools: + +=============================== + Connection and Producer Pools +=============================== + +.. _default-pools: + +Default Pools +============= + +Kombu ships with two global pools: one connection pool, +and one producer pool. + +These are convenient and the fact that they are global +may not be an issue as connections should often be limited +at the process level, rather than per thread/application +and so on, but if you need custom pools per thread +see :ref:`custom-pool-groups`. + + +.. _default-connections: + +The connection pool group +------------------------- + +The connection pools are available as :attr:`kombu.pools.connections`. +This is a pool group, which means you give it a connection instance, +and you get a pool instance back. We have one pool per connection +instance to support multiple connections in the same app. +All connection instances with the same connection parameters will +get the same pool:: + + >>> from kombu import Connection + >>> from kombu.pools import connections + + >>> connections[Connection('redis://localhost:6379')] + + >>> connections[Connection('redis://localhost:6379')] + + +Let's acquire and release a connection: + +.. code-block:: python + + from kombu import Connection + from kombu.pools import connections + + connection = Connection('redis://localhost:6379') + + with connections[connection].acquire(block=True) as conn: + print('Got connection: %r' % (connection.as_uri(), )) + +.. note:: + + The ``block=True`` here means that the acquire call will block + until a connection is available in the pool. + Note that this will block forever in case there is a deadlock + in your code where a connection is not released. There + is a ``timeout`` argument you can use to safeguard against this + (see :meth:`kombu.connection.Resource.acquire`). + + If blocking is disabled and there aren't any connections + left in the pool an :class:`kombu.exceptions.ConnectionLimitExceeded` + exception will be raised. + +That's about it. If you need to connect to multiple brokers +at once you can do that too: + +.. code-block:: python + + from kombu import Connection + from kombu.pools import connections + + c1 = Connection('amqp://') + c2 = Connection('redis://') + + with connections[c1].acquire(block=True) as conn1: + with connections[c2].acquire(block=True) as conn2: + # .... + +.. _default-producers: + +The producer pool group +======================= + +This is a pool group just like the connections, except +that it manages :class:`~kombu.Producer` instances +used to publish messages. + +Here is an example using the producer pool to publish a message +to the ``news`` exchange: + +.. code-block:: python + + from kombu import Connection, Exchange + from kombu.common import maybe_declare + from kombu.pools import producers + + # The exchange we send our news articles to. + news_exchange = Exchange('news') + + # The article we want to send + article = {'title': 'No cellular coverage on the tube for 2012', + 'ingress': 'yadda yadda yadda'} + + # The broker where our exchange is. + connection = Connection('amqp://guest:guest@localhost:5672//') + + with producers[connection].acquire(block=True) as producer: + # maybe_declare knows what entities have already been declared + # so we don't have to do so multiple times in the same process. + maybe_declare(news_exchange) + producer.publish(article, routing_key='domestic', + serializer='json', + compression='zlib') + +.. _default-pool-limits: + +Setting pool limits +------------------- + +By default every connection instance has a limit of 200 connections. +You can change this limit using :func:`kombu.pools.set_limit`. +You are able to grow the pool at runtime, but you can't shrink it, +so it is best to set the limit as early as possible after your application +starts:: + + >>> from kombu import pools + >>> pools.set_limit() + +Resetting all pools +------------------- + +You can close all active connections and reset all pool groups by +using the :func:`kombu.pools.reset` function. Note that this +will not respect anything currently using these connections, +so will just drag the connections away from under their feet: +you should be very careful before you use this. + +Kombu will reset the pools if the process is forked, +so that forked processes start with clean pool groups. + +.. _custom-pool-groups: + +Custom Pool Groups +================== + +To maintain your own pool groups you should create your own +:class:`~kombu.pools.Connections` and :class:`kombu.pools.Producers` +instances: + +.. code-block:: python + + from kombu import pools + from kombu import Connection + + connections = pools.Connection(limit=100) + producers = pools.Producers(limit=connections.limit) + + connection = Connection('amqp://guest:guest@localhost:5672//') + + with connections[connection].acquire(block=True): + # ... + + +If you want to use the global limit that can be set with +:func:`~kombu.pools.set_limit` you can use a special value as the ``limit`` +argument: + +.. code-block:: python + + from kombu import pools + + connections = pools.Connections(limit=pools.use_default_limit) diff --git a/docs/userguide/producers.rst b/docs/userguide/producers.rst new file mode 100644 index 0000000..454b4ca --- /dev/null +++ b/docs/userguide/producers.rst @@ -0,0 +1,24 @@ +.. _guide-producers: + +=========== + Producers +=========== + +.. _producer-basics: + +Basics +====== + + +Serialization +============= + +See :ref:`guide-serialization`. + + +Reference +========= + +.. autoclass:: kombu.Producer + :noindex: + :members: diff --git a/docs/userguide/serialization.rst b/docs/userguide/serialization.rst new file mode 100644 index 0000000..37169a8 --- /dev/null +++ b/docs/userguide/serialization.rst @@ -0,0 +1,184 @@ +.. _guide-serialization: + +=============== + Serialization +=============== + +.. _serializers: + +Serializers +=========== + +By default every message is encoded using `JSON`_, so sending +Python data structures like dictionaries and lists works. +`YAML`_, `msgpack`_ and Python's built-in `pickle` module is also supported, +and if needed you can register any custom serialization scheme you +want to use. + + +By default Kombu will only load JSON messages, so if you want +to use other serialization format you must explicitly enable +them in your consumer by using the ``accept`` argument: + +.. code-block:: python + + Consumer(conn, [queue], accept=['json', 'pickle', 'msgpack']) + +The accept argument can also include MIME-types. + +.. _`JSON`: http://www.json.org/ +.. _`YAML`: http://yaml.org/ +.. _`msgpack`: http://msgpack.sourceforge.net/ + +Each option has its advantages and disadvantages. + +`json` -- JSON is supported in many programming languages, is now + a standard part of Python (since 2.6), and is fairly fast to + decode using the modern Python libraries such as `cjson` or + `simplejson`. + + The primary disadvantage to `JSON` is that it limits you to + the following data types: strings, Unicode, floats, boolean, + dictionaries, and lists. Decimals and dates are notably missing. + + Also, binary data will be transferred using Base64 encoding, which + will cause the transferred data to be around 34% larger than an + encoding which supports native binary types. + + However, if your data fits inside the above constraints and + you need cross-language support, the default setting of `JSON` + is probably your best choice. + +`pickle` -- If you have no desire to support any language other than + Python, then using the `pickle` encoding will gain you + the support of all built-in Python data types (except class instances), + smaller messages when sending binary files, and a slight speedup + over `JSON` processing. + + .. admonition:: Pickle and Security + + The pickle format is very convenient as it can serialize + and deserialize almost any object, but this is also a concern + for security. + + Carefully crafted pickle payloads can do almost anything + a regular Python program can do, so if you let your consumer + automatically decode pickled objects you must make sure + to limit access to the broker so that untrusted + parties do not have the ability to send messages! + + By default Kombu uses pickle protocol 2, but this can be changed + using the :envvar:`PICKLE_PROTOCOL` environment variable or by changing + the global :data:`kombu.serialization.pickle_protocol` flag. + +`yaml` -- YAML has many of the same characteristics as `json`, + except that it natively supports more data types (including dates, + recursive references, etc.) + + However, the Python libraries for YAML are a good bit slower + than the libraries for JSON. + + If you need a more expressive set of data types and need to maintain + cross-language compatibility, then `YAML` may be a better fit + than the above. + +To instruct `Kombu` to use an alternate serialization method, +use one of the following options. + + 1. Set the serialization option on a per-producer basis:: + + >>> producer = Producer(channel, + ... exchange=exchange, + ... serializer="yaml") + + 2. Set the serialization option per message:: + + >>> producer.publish(message, routing_key=rkey, + ... serializer="pickle") + +Note that a `Consumer` do not need the serialization method specified. +They can auto-detect the serialization method as the +content-type is sent as a message header. + +.. _sending-raw-data: + +Sending raw data without Serialization +====================================== + +In some cases, you don't need your message data to be serialized. If you +pass in a plain string or Unicode object as your message, then `Kombu` will +not waste cycles serializing/deserializing the data. + +You can optionally specify a `content_type` and `content_encoding` +for the raw data:: + + >>> with open("~/my_picture.jpg", "rb") as fh: + ... producer.publish(fh.read(), + content_type="image/jpeg", + content_encoding="binary", + routing_key=rkey) + +The `Message` object returned by the `Consumer` class will have a +`content_type` and `content_encoding` attribute. + +.. _serialization-entrypoints: + +Creating extensions using Setuptools entry-points +================================================= + +A package can also register new serializers using Setuptools +entry-points. + +The entry-point must provide the name of the serializer along +with the path to a tuple providing the rest of the args: +``decoder_function, encoder_function, content_type, content_encoding``. + +An example entrypoint could be: + +.. code-block:: python + + from setuptools import setup + + setup( + entry_points={ + 'kombu.serializers': [ + 'my_serializer = my_module.serializer:register_args' + ] + } + ) + + +Then the module ``my_module.serializer`` would look like: + +.. code-block:: python + + register_args = (my_decoder, my_encoder, 'application/x-mimetype', 'utf-8') + + +When this package is installed the new 'my_serializer' serializer will be +supported by Kombu. + + +.. admonition:: Buffer Objects + + The decoder function of custom serializer must support both strings + and Python's old-style buffer objects. + + Python pickle and json modules usually don't do this via its ``loads`` + function, but you can easily add support by making a wrapper around the + ``load`` function that takes file objects instead of strings. + + Here's an example wrapping :func:`pickle.loads` in such a way: + + .. code-block:: python + + import pickle + from kombu.serialization import BytesIO, register + + + def loads(s): + return pickle.load(BytesIO(s)) + + register('my_pickle', pickle.dumps, loads, + content_type='application/x-pickle2', + content_encoding='binary') diff --git a/docs/userguide/simple.rst b/docs/userguide/simple.rst new file mode 100644 index 0000000..4cf98ec --- /dev/null +++ b/docs/userguide/simple.rst @@ -0,0 +1,116 @@ +.. _guide-simple: + +================== + Simple Interface +================== + +.. contents:: + :local: + + +:mod:`kombu.simple` is a simple interface to AMQP queueing. +It is only slightly different from the :class:`~Queue.Queue` class in the +Python Standard Library, which makes it excellent for users with basic +messaging needs. + +Instead of defining exchanges and queues, the simple classes only requires +two arguments, a connection channel and a name. The name is used as the +queue, exchange and routing key. If the need arises, you can specify +a :class:`~kombu.Queue` as the name argument instead. + +In addition, the :class:`~kombu.Connection` comes with +shortcuts to create simple queues using the current connection: + +.. code-block:: python + + >>> queue = connection.SimpleQueue('myqueue') + >>> # ... do something with queue + >>> queue.close() + + +This is equivalent to: + +.. code-block:: python + + >>> from kombu import SimpleQueue, SimpleBuffer + + >>> channel = connection.channel() + >>> queue = SimpleBuffer(channel) + >>> # ... do something with queue + >>> channel.close() + >>> queue.close() + +.. _simple-send-receive: + +Sending and receiving messages +============================== + +The simple interface defines two classes; :class:`~kombu.simple.SimpleQueue`, +and :class:`~kombu.simple.SimpleBuffer`. The former is used for persistent +messages, and the latter is used for transient, buffer-like queues. +They both have the same interface, so you can use them interchangeably. + +Here is an example using the :class:`~kombu.simple.SimpleQueue` class +to produce and consume logging messages: + +.. code-block:: python + + import socket + import datetime + from time import time + from kombu import Connection + + + class Logger(object): + + def __init__(self, connection, queue_name='log_queue', + serializer='json', compression=None): + self.queue = connection.SimpleQueue(queue_name) + self.serializer = serializer + self.compression = compression + + def log(self, message, level='INFO', context={}): + self.queue.put({'message': message, + 'level': level, + 'context': context, + 'hostname': socket.gethostname(), + 'timestamp': time()}, + serializer=self.serializer, + compression=self.compression) + + def process(self, callback, n=1, timeout=1): + for i in xrange(n): + log_message = self.queue.get(block=True, timeout=1) + entry = log_message.payload # deserialized data. + callback(entry) + log_message.ack() # remove message from queue + + def close(self): + self.queue.close() + + + if __name__ == '__main__': + from contextlib import closing + + with Connection('amqp://guest:guest@localhost:5672//') as conn: + with closing(Logger(conn)) as logger: + + # Send message + logger.log('Error happened while encoding video', + level='ERROR', + context={'filename': 'cutekitten.mpg'}) + + # Consume and process message + + # This is the callback called when a log message is + # received. + def dump_entry(entry): + date = datetime.datetime.fromtimestamp(entry['timestamp']) + print('[%s %s %s] %s %r' % (date, + entry['hostname'], + entry['level'], + entry['message'], + entry['context'])) + + # Process a single message using the callback above. + logger.process(dump_entry, n=1) diff --git a/examples/complete_receive.py b/examples/complete_receive.py new file mode 100644 index 0000000..30d4d46 --- /dev/null +++ b/examples/complete_receive.py @@ -0,0 +1,41 @@ +""" +Example of simple consumer that waits for a single message, acknowledges it +and exits. +""" +from kombu import Connection, Exchange, Queue, Consumer, eventloop +from pprint import pformat + +#: By default messages sent to exchanges are persistent (delivery_mode=2), +#: and queues and exchanges are durable. +exchange = Exchange('kombu_demo', type='direct') +queue = Queue('kombu_demo', exchange, routing_key='kombu_demo') + + +def pretty(obj): + return pformat(obj, indent=4) + + +#: This is the callback applied when a message is received. +def handle_message(body, message): + print('Received message: %r' % (body, )) + print(' properties:\n%s' % (pretty(message.properties), )) + print(' delivery_info:\n%s' % (pretty(message.delivery_info), )) + message.ack() + +#: Create a connection and a channel. +#: If hostname, userid, password and virtual_host is not specified +#: the values below are the default, but listed here so it can +#: be easily changed. +with Connection('amqp://guest:guest@localhost:5672//') as connection: + + #: Create consumer using our callback and queue. + #: Second argument can also be a list to consume from + #: any number of queues. + with Consumer(connection, queue, callbacks=[handle_message]): + + #: Each iteration waits for a single event. Note that this + #: event may not be a message, or a message that is to be + #: delivered to the consumers channel, but any event received + #: on the connection. + for _ in eventloop(connection): + pass diff --git a/examples/complete_send.py b/examples/complete_send.py new file mode 100644 index 0000000..337083c --- /dev/null +++ b/examples/complete_send.py @@ -0,0 +1,30 @@ +""" + +Example producer that sends a single message and exits. + +You can use `complete_receive.py` to receive the message sent. + +""" +from kombu import Connection, Producer, Exchange, Queue + +#: By default messages sent to exchanges are persistent (delivery_mode=2), +#: and queues and exchanges are durable. +exchange = Exchange('kombu_demo', type='direct') +queue = Queue('kombu_demo', exchange, routing_key='kombu_demo') + + +with Connection('amqp://guest:guest@localhost:5672//') as connection: + + #: Producers are used to publish messages. + #: a default exchange and routing key can also be specifed + #: as arguments the Producer, but we rather specify this explicitly + #: at the publish call. + producer = Producer(connection) + + #: Publish the message using the json serializer (which is the default), + #: and zlib compression. The kombu consumer will automatically detect + #: encoding, serialization and compression used and decode accordingly. + producer.publish({'hello': 'world'}, + exchange=exchange, + routing_key='kombu_demo', + serializer='json', compression='zlib') diff --git a/examples/experimental/async_consume.py b/examples/experimental/async_consume.py new file mode 100644 index 0000000..1127128 --- /dev/null +++ b/examples/experimental/async_consume.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +from kombu import Connection, Exchange, Queue, Producer, Consumer +from kombu.async import Hub +from threading import Event + +hub = Hub() +exchange = Exchange('asynt') +queue = Queue('asynt', exchange, 'asynt') + +def send_message(conn): + producer = Producer(conn) + producer.publish('hello world', exchange=exchange, routing_key='asynt') + print('MESSAGE SENT') + + +def on_message(message): + print('RECEIVED: %r' % (message.body, )) + message.ack() + hub.stop() # <-- exit after one message + + +if __name__ == '__main__': + conn = Connection('amqp://') + conn.register_with_event_loop(hub) + + with Consumer(conn, [queue], on_message=on_message): + send_message(conn) + hub.run_forever() diff --git a/examples/hello_consumer.py b/examples/hello_consumer.py new file mode 100644 index 0000000..695c655 --- /dev/null +++ b/examples/hello_consumer.py @@ -0,0 +1,8 @@ +from kombu import Connection + +with Connection('amqp://guest:guest@localhost:5672//') as conn: + simple_queue = conn.SimpleQueue('simple_queue') + message = simple_queue.get(block=True, timeout=1) + print("Received: %s" % message.payload) + message.ack() + simple_queue.close() diff --git a/examples/hello_publisher.py b/examples/hello_publisher.py new file mode 100644 index 0000000..80ec282 --- /dev/null +++ b/examples/hello_publisher.py @@ -0,0 +1,9 @@ +from kombu import Connection +import datetime + +with Connection('amqp://guest:guest@localhost:5672//') as conn: + simple_queue = conn.SimpleQueue('simple_queue') + message = 'helloword, sent at %s' % datetime.datetime.today() + simple_queue.put(message) + print('Sent: %s' % message) + simple_queue.close() diff --git a/examples/simple_eventlet_receive.py b/examples/simple_eventlet_receive.py new file mode 100644 index 0000000..f353d1c --- /dev/null +++ b/examples/simple_eventlet_receive.py @@ -0,0 +1,39 @@ +""" + +Example that sends a single message and exits using the simple interface. + +You can use `simple_receive.py` (or `complete_receive.py`) to receive the +message sent. + +""" +import eventlet + +from kombu import Connection + +eventlet.monkey_patch() + + +def wait_many(timeout=1): + + #: Create connection + #: If hostname, userid, password and virtual_host is not specified + #: the values below are the default, but listed here so it can + #: be easily changed. + with Connection('amqp://guest:guest@localhost:5672//') as connection: + + #: SimpleQueue mimics the interface of the Python Queue module. + #: First argument can either be a queue name or a kombu.Queue object. + #: If a name, then the queue will be declared with the name as the + #: queue name, exchange name and routing key. + with connection.SimpleQueue('kombu_demo') as queue: + + while True: + try: + message = queue.get(block=False, timeout=timeout) + except queue.Empty: + break + else: + message.ack() + print(message.payload) + +eventlet.spawn(wait_many).wait() diff --git a/examples/simple_eventlet_send.py b/examples/simple_eventlet_send.py new file mode 100644 index 0000000..c2b3690 --- /dev/null +++ b/examples/simple_eventlet_send.py @@ -0,0 +1,40 @@ +""" + +Example that sends a single message and exits using the simple interface. + +You can use `simple_receive.py` (or `complete_receive.py`) to receive the +message sent. + +""" +import eventlet + +from kombu import Connection + +eventlet.monkey_patch() + + +def send_many(n): + + #: Create connection + #: If hostname, userid, password and virtual_host is not specified + #: the values below are the default, but listed here so it can + #: be easily changed. + with Connection('amqp://guest:guest@localhost:5672//') as connection: + + #: SimpleQueue mimics the interface of the Python Queue module. + #: First argument can either be a queue name or a kombu.Queue object. + #: If a name, then the queue will be declared with the name as the + #: queue name, exchange name and routing key. + with connection.SimpleQueue('kombu_demo') as queue: + + def send_message(i): + queue.put({'hello': 'world%s' % (i, )}) + + pool = eventlet.GreenPool(10) + for i in range(n): + pool.spawn(send_message, i) + pool.waitall() + + +if __name__ == '__main__': + send_many(10) diff --git a/examples/simple_receive.py b/examples/simple_receive.py new file mode 100644 index 0000000..906c274 --- /dev/null +++ b/examples/simple_receive.py @@ -0,0 +1,26 @@ +""" +Example receiving a message using the SimpleQueue interface. +""" + +from kombu import Connection + +#: Create connection +#: If hostname, userid, password and virtual_host is not specified +#: the values below are the default, but listed here so it can +#: be easily changed. +with Connection('amqp://guest:guest@localhost:5672//') as conn: + + #: SimpleQueue mimics the interface of the Python Queue module. + #: First argument can either be a queue name or a kombu.Queue object. + #: If a name, then the queue will be declared with the name as the queue + #: name, exchange name and routing key. + with conn.SimpleQueue('kombu_demo') as queue: + message = queue.get(block=True, timeout=10) + message.ack() + print(message.payload) + +#### +#: If you don't use the with statement then you must aways +# remember to close objects after use: +# queue.close() +# connection.close() diff --git a/examples/simple_send.py b/examples/simple_send.py new file mode 100644 index 0000000..3b3f236 --- /dev/null +++ b/examples/simple_send.py @@ -0,0 +1,29 @@ +""" + +Example that sends a single message and exits using the simple interface. + +You can use `simple_receive.py` (or `complete_receive.py`) to receive the +message sent. + +""" +from kombu import Connection + +#: Create connection +#: If hostname, userid, password and virtual_host is not specified +#: the values below are the default, but listed here so it can +#: be easily changed. +with Connection('amqp://guest:guest@localhost:5672//') as conn: + + #: SimpleQueue mimics the interface of the Python Queue module. + #: First argument can either be a queue name or a kombu.Queue object. + #: If a name, then the queue will be declared with the name as the queue + #: name, exchange name and routing key. + with conn.SimpleQueue('kombu_demo') as queue: + queue.put({'hello': 'world'}, serializer='json', compression='zlib') + + +##### +# If you don't use the with statement, you must always +# remember to close objects. +# queue.close() +# connection.close() diff --git a/examples/simple_task_queue/__init__.py b/examples/simple_task_queue/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/simple_task_queue/client.py b/examples/simple_task_queue/client.py new file mode 100644 index 0000000..0c28932 --- /dev/null +++ b/examples/simple_task_queue/client.py @@ -0,0 +1,28 @@ +from kombu.pools import producers + +from .queues import task_exchange + +priority_to_routing_key = {'high': 'hipri', + 'mid': 'midpri', + 'low': 'lopri'} + + +def send_as_task(connection, fun, args=(), kwargs={}, priority='mid'): + payload = {'fun': fun, 'args': args, 'kwargs': kwargs} + routing_key = priority_to_routing_key[priority] + + with producers[connection].acquire(block=True) as producer: + producer.publish(payload, + serializer='pickle', + compression='bzip2', + exchange=task_exchange, + declare=[task_exchange], + routing_key=routing_key) + +if __name__ == '__main__': + from kombu import Connection + from .tasks import hello_task + + connection = Connection('amqp://guest:guest@localhost:5672//') + send_as_task(connection, fun=hello_task, args=('Kombu', ), kwargs={}, + priority='high') diff --git a/examples/simple_task_queue/queues.py b/examples/simple_task_queue/queues.py new file mode 100644 index 0000000..602c2b0 --- /dev/null +++ b/examples/simple_task_queue/queues.py @@ -0,0 +1,6 @@ +from kombu import Exchange, Queue + +task_exchange = Exchange('tasks', type='direct') +task_queues = [Queue('hipri', task_exchange, routing_key='hipri'), + Queue('midpri', task_exchange, routing_key='midpri'), + Queue('lopri', task_exchange, routing_key='lopri')] diff --git a/examples/simple_task_queue/tasks.py b/examples/simple_task_queue/tasks.py new file mode 100644 index 0000000..f6e9da0 --- /dev/null +++ b/examples/simple_task_queue/tasks.py @@ -0,0 +1,2 @@ +def hello_task(who="world"): + print("Hello %s" % (who, )) diff --git a/examples/simple_task_queue/worker.py b/examples/simple_task_queue/worker.py new file mode 100644 index 0000000..ded3aa7 --- /dev/null +++ b/examples/simple_task_queue/worker.py @@ -0,0 +1,42 @@ +from kombu.mixins import ConsumerMixin +from kombu.log import get_logger +from kombu.utils import kwdict, reprcall + +from .queues import task_queues + +logger = get_logger(__name__) + + +class Worker(ConsumerMixin): + + def __init__(self, connection): + self.connection = connection + + def get_consumers(self, Consumer, channel): + return [Consumer(queues=task_queues, + accept=['pickle', 'json'], + callbacks=[self.process_task])] + + def process_task(self, body, message): + fun = body['fun'] + args = body['args'] + kwargs = body['kwargs'] + logger.info('Got task: %s', reprcall(fun.__name__, args, kwargs)) + try: + fun(*args, **kwdict(kwargs)) + except Exception as exc: + logger.error('task raised exception: %r', exc) + message.ack() + +if __name__ == '__main__': + from kombu import Connection + from kombu.utils.debug import setup_logging + # setup root logger + setup_logging(loglevel='INFO', loggers=['']) + + with Connection('amqp://guest:guest@localhost:5672//') as conn: + try: + worker = Worker(conn) + worker.run() + except KeyboardInterrupt: + print('bye bye') diff --git a/extra/doc2ghpages b/extra/doc2ghpages new file mode 100755 index 0000000..5ebc7aa --- /dev/null +++ b/extra/doc2ghpages @@ -0,0 +1,13 @@ +#!/bin/bash + +git checkout master +(cd docs; + rm -rf .build; + make html; + (cd .build/html; + sphinx-to-github;)) +git checkout gh-pages +cp -r docs/.build/html/* . +git commit . -m "Autogenerated documentation for github." +git push origin gh-pages +git checkout master diff --git a/extra/release/bump_version.py b/extra/release/bump_version.py new file mode 100755 index 0000000..be122ec --- /dev/null +++ b/extra/release/bump_version.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python +from __future__ import absolute_import + +import errno +import os +import re +import sys +import subprocess + +from contextlib import contextmanager +from tempfile import NamedTemporaryFile + +rq = lambda s: s.strip("\"'") +str_t = str if sys.version_info[0] >= 3 else basestring + + +def cmd(*args): + return subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0] + + +@contextmanager +def no_enoent(): + try: + yield + except OSError as exc: + if exc.errno != errno.ENOENT: + raise + + +class StringVersion(object): + + def decode(self, s): + s = rq(s) + text = "" + major, minor, release = s.split(".") + if not release.isdigit(): + pos = release.index(re.split("\d+", release)[1][0]) + release, text = release[:pos], release[pos:] + return int(major), int(minor), int(release), text + + def encode(self, v): + return ".".join(map(str, v[:3])) + v[3] +to_str = StringVersion().encode +from_str = StringVersion().decode + + +class TupleVersion(object): + + def decode(self, s): + v = list(map(rq, s.split(", "))) + return (tuple(map(int, v[0:3])) + + tuple(["".join(v[3:])])) + + def encode(self, v): + v = list(v) + + def quote(lit): + if isinstance(lit, str_t): + return '"%s"' % (lit, ) + return str(lit) + + if not v[-1]: + v.pop() + return ", ".join(map(quote, v)) + + +class VersionFile(object): + + def __init__(self, filename): + self.filename = filename + self._kept = None + + def _as_orig(self, version): + return self.wb % {"version": self.type.encode(version), + "kept": self._kept} + + def write(self, version): + pattern = self.regex + with no_enoent(): + with NamedTemporaryFile() as dest: + with open(self.filename) as orig: + for line in orig: + if pattern.match(line): + dest.write(self._as_orig(version)) + else: + dest.write(line) + os.rename(dest.name, self.filename) + + def parse(self): + pattern = self.regex + gpos = 0 + with open(self.filename) as fh: + for line in fh: + m = pattern.match(line) + if m: + if "?P" in pattern.pattern: + self._kept, gpos = m.groupdict()["keep"], 1 + return self.type.decode(m.groups()[gpos]) + + +class PyVersion(VersionFile): + regex = re.compile(r'^VERSION\s*=\s*\((.+?)\)') + wb = "VERSION = (%(version)s)\n" + type = TupleVersion() + + +class SphinxVersion(VersionFile): + regex = re.compile(r'^:[Vv]ersion:\s*(.+?)$') + wb = ':Version: %(version)s\n' + type = StringVersion() + + +class CPPVersion(VersionFile): + regex = re.compile(r'^\#\s*define\s*(?P\w*)VERSION\s+(.+)') + wb = '#define %(kept)sVERSION "%(version)s"\n' + type = StringVersion() + + +_filetype_to_type = {"py": PyVersion, + "rst": SphinxVersion, + "c": CPPVersion, + "h": CPPVersion} + + +def filetype_to_type(filename): + _, _, suffix = filename.rpartition(".") + return _filetype_to_type[suffix](filename) + + +def bump(*files, **kwargs): + version = kwargs.get("version") + files = [filetype_to_type(f) for f in files] + versions = [v.parse() for v in files] + current = list(reversed(sorted(versions)))[0] # find highest + + if version: + next = from_str(version) + else: + major, minor, release, text = current + if text: + raise Exception("Can't bump alpha releases") + next = (major, minor, release + 1, text) + + print("Bump version from %s -> %s" % (to_str(current), to_str(next))) + + for v in files: + print(" writing %r..." % (v.filename, )) + v.write(next) + + print(cmd("git", "commit", "-m", "Bumps version to %s" % (to_str(next), ), + *[f.filename for f in files])) + print(cmd("git", "tag", "v%s" % (to_str(next), ))) + + +def main(argv=sys.argv, version=None): + if not len(argv) > 1: + print("Usage: distdir [docfile] -- ") + sys.exit(0) + if "--" in argv: + c = argv.index('--') + version = argv[c + 1] + argv = argv[:c] + bump(*argv[1:], version=version) + +if __name__ == "__main__": + main() diff --git a/extra/release/doc4allmods b/extra/release/doc4allmods new file mode 100755 index 0000000..f95fee2 --- /dev/null +++ b/extra/release/doc4allmods @@ -0,0 +1,38 @@ +#!/bin/bash + +PACKAGE="$1" +SKIP_PACKAGES="$PACKAGE tests management urls" +SKIP_FILES="kombu.entity.rst + kombu.messaging.rst + kombu.transport.django.migrations.rst + kombu.transport.django.migrations.0001_initial.rst + kombu.transport.django.management.rst + kombu.transport.django.management.commands.rst" + +modules=$(find "$PACKAGE" -name "*.py") + +failed=0 +for module in $modules; do + dotted=$(echo $module | sed 's/\//\./g') + name=${dotted%.__init__.py} + name=${name%.py} + rst=$name.rst + skip=0 + for skip_package in $SKIP_PACKAGES; do + [ $(echo "$name" | cut -d. -f 2) == "$skip_package" ] && skip=1 + done + for skip_file in $SKIP_FILES; do + [ "$skip_file" == "$rst" ] && skip=1 + done + + if [ $skip -eq 0 ]; then + if [ ! -f "docs/reference/$rst" ]; then + if [ ! -f "docs/internals/reference/$rst" ]; then + echo $rst :: FAIL + failed=1 + fi + fi + fi +done + +exit $failed diff --git a/extra/release/flakeplus.py b/extra/release/flakeplus.py new file mode 100755 index 0000000..ebeb8e0 --- /dev/null +++ b/extra/release/flakeplus.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python +from __future__ import absolute_import + +import os +import re +import sys + +from collections import defaultdict +from unipath import Path + +RE_COMMENT = r'^\s*\#' +RE_NOQA = r'.+?\#\s+noqa+' +RE_MULTILINE_COMMENT_O = r'^\s*(?:\'\'\'|""").+?(?:\'\'\'|""")' +RE_MULTILINE_COMMENT_S = r'^\s*(?:\'\'\'|""")' +RE_MULTILINE_COMMENT_E = r'(?:^|.+?)(?:\'\'\'|""")' +RE_WITH = r'(?:^|\s+)with\s+' +RE_WITH_IMPORT = r'''from\s+ __future__\s+ import\s+ with_statement''' +RE_PRINT = r'''(?:^|\s+)print\((?:"|')(?:\W+?)?[A-Z0-9:]{2,}''' +RE_ABS_IMPORT = r'''from\s+ __future__\s+ import\s+ absolute_import''' + +acc = defaultdict(lambda: {"abs": False, "print": False}) + + +def compile(regex): + return re.compile(regex, re.VERBOSE) + + +class FlakePP(object): + re_comment = compile(RE_COMMENT) + re_ml_comment_o = compile(RE_MULTILINE_COMMENT_O) + re_ml_comment_s = compile(RE_MULTILINE_COMMENT_S) + re_ml_comment_e = compile(RE_MULTILINE_COMMENT_E) + re_abs_import = compile(RE_ABS_IMPORT) + re_print = compile(RE_PRINT) + re_with_import = compile(RE_WITH_IMPORT) + re_with = compile(RE_WITH) + re_noqa = compile(RE_NOQA) + map = {"abs": True, "print": False, + "with": False, "with-used": False} + + def __init__(self, verbose=False): + self.verbose = verbose + self.steps = (("abs", self.re_abs_import), + ("with", self.re_with_import), + ("with-used", self.re_with), + ("print", self.re_print)) + + def analyze_fh(self, fh): + steps = self.steps + filename = fh.name + acc = dict(self.map) + index = 0 + errors = [0] + + def error(fmt, **kwargs): + errors[0] += 1 + self.announce(fmt, **dict(kwargs, filename=filename)) + + for index, line in enumerate(self.strip_comments(fh)): + for key, pattern in steps: + if pattern.match(line): + acc[key] = True + if index: + if not acc["abs"]: + error("%(filename)s: missing abs import") + if acc["with-used"] and not acc["with"]: + error("%(filename)s: missing with import") + if acc["print"]: + error("%(filename)s: left over print statement") + + return filename, errors[0], acc + + def analyze_file(self, filename): + with open(filename) as fh: + return self.analyze_fh(fh) + + def analyze_tree(self, dir): + for dirpath, _, filenames in os.walk(dir): + for path in (Path(dirpath, f) for f in filenames): + if path.endswith(".py"): + yield self.analyze_file(path) + + def analyze(self, *paths): + for path in map(Path, paths): + if path.isdir(): + for res in self.analyze_tree(path): + yield res + else: + yield self.analyze_file(path) + + def strip_comments(self, fh): + re_comment = self.re_comment + re_ml_comment_o = self.re_ml_comment_o + re_ml_comment_s = self.re_ml_comment_s + re_ml_comment_e = self.re_ml_comment_e + re_noqa = self.re_noqa + in_ml = False + + for line in fh.readlines(): + if in_ml: + if re_ml_comment_e.match(line): + in_ml = False + else: + if re_noqa.match(line) or re_ml_comment_o.match(line): + pass + elif re_ml_comment_s.match(line): + in_ml = True + elif re_comment.match(line): + pass + else: + yield line + + def announce(self, fmt, **kwargs): + sys.stderr.write((fmt + "\n") % kwargs) + + +def main(argv=sys.argv, exitcode=0): + for _, errors, _ in FlakePP(verbose=True).analyze(*argv[1:]): + if errors: + exitcode = 1 + return exitcode + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/extra/release/removepyc.sh b/extra/release/removepyc.sh new file mode 100755 index 0000000..9aaf365 --- /dev/null +++ b/extra/release/removepyc.sh @@ -0,0 +1,3 @@ +#!/bin/bash +(cd "${1:-.}"; + find . -name "*.pyc" | xargs rm -- 2>/dev/null) || echo "ok" diff --git a/extra/release/verify-reference-index.sh b/extra/release/verify-reference-index.sh new file mode 100755 index 0000000..feaa0da --- /dev/null +++ b/extra/release/verify-reference-index.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +verify_index() { + modules=$(grep "kombu." "$1" | \ + perl -ple's/^\s*|\s*$//g;s{\.}{/}g;') + retval=0 + for module in $modules; do + if [ ! -f "$module.py" ]; then + if [ ! -f "$module/__init__.py" ]; then + echo "Outdated reference: $module" + retval=1 + fi + fi + done + + return $retval +} + +verify_index docs/reference/index.rst diff --git a/funtests/__init__.py b/funtests/__init__.py new file mode 100644 index 0000000..1bea488 --- /dev/null +++ b/funtests/__init__.py @@ -0,0 +1,5 @@ +import os +import sys + +sys.path.insert(0, os.pardir) +sys.path.insert(0, os.getcwd()) diff --git a/funtests/setup.cfg b/funtests/setup.cfg new file mode 100644 index 0000000..321d1e4 --- /dev/null +++ b/funtests/setup.cfg @@ -0,0 +1,4 @@ +[nosetests] +verbosity = 1 +detailed-errors = 1 +where = tests diff --git a/funtests/setup.py b/funtests/setup.py new file mode 100644 index 0000000..72f6553 --- /dev/null +++ b/funtests/setup.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +try: + from setuptools import setup + from setuptools.command.install import install +except ImportError: + from ez_setup import use_setuptools + use_setuptools() + from setuptools import setup # noqa + from setuptools.command.install import install # noqa + + +class no_install(install): + + def run(self, *args, **kwargs): + import sys + sys.stderr.write(""" +---------------------------------------------------- +The Kombu functional test suite cannot be installed. +---------------------------------------------------- + + +But you can execute the tests by running the command: + + $ python setup.py test + + +""") + + +setup( + name='kombu-funtests', + version='DEV', + description='Functional test suite for Kombu', + author='Ask Solem', + author_email='ask@celeryproject.org', + url='http://github.com/celery/kombu', + platforms=['any'], + packages=[], + data_files=[], + zip_safe=False, + cmdclass={'install': no_install}, + test_suite='nose.collector', + build_requires=[ + 'nose', + 'unittest2', + 'coverage>=3.0', + 'simplejson', + 'PyYAML', + 'msgpack-python', + 'pymongo', + 'couchdb', + 'kazoo', + 'beanstalkc', + 'kombu-sqlalchemy', + 'django', + 'django-kombu', + ], + classifiers=[ + 'Operating System :: OS Independent', + 'Programming Language :: Python', + 'License :: OSI Approved :: BSD License', + 'Intended Audience :: Developers', + ], + long_description='Do not install this package', +) diff --git a/funtests/tests/__init__.py b/funtests/tests/__init__.py new file mode 100644 index 0000000..41cbef6 --- /dev/null +++ b/funtests/tests/__init__.py @@ -0,0 +1,7 @@ +import os +import sys + +sys.path.insert(0, os.path.join(os.getcwd(), os.pardir)) +print(sys.path[0]) +sys.path.insert(0, os.getcwd()) +print(sys.path[0]) diff --git a/funtests/tests/test_SLMQ.py b/funtests/tests/test_SLMQ.py new file mode 100644 index 0000000..d8fd47a --- /dev/null +++ b/funtests/tests/test_SLMQ.py @@ -0,0 +1,29 @@ + +from funtests import transport +from nose import SkipTest +import os + + +class test_SLMQ(transport.TransportCase): + transport = "SLMQ" + prefix = "slmq" + event_loop_max = 100 + message_size_limit = 4192 + reliable_purge = False + suppress_disorder_warning = True # does not guarantee FIFO order, + # even in simple cases. + + def before_connect(self): + if "SLMQ_ACCOUNT" not in os.environ: + raise SkipTest("Missing envvar SLMQ_ACCOUNT") + if "SL_USERNAME" not in os.environ: + raise SkipTest("Missing envvar SL_USERNAME") + if "SL_API_KEY" not in os.environ: + raise SkipTest("Missing envvar SL_API_KEY") + if "SLMQ_HOST" not in os.environ: + raise SkipTest("Missing envvar SLMQ_HOST") + if "SLMQ_SECURE" not in os.environ: + raise SkipTest("Missing envvar SLMQ_SECURE") + + def after_connect(self, connection): + pass diff --git a/funtests/tests/test_SQS.py b/funtests/tests/test_SQS.py new file mode 100644 index 0000000..de08efb --- /dev/null +++ b/funtests/tests/test_SQS.py @@ -0,0 +1,28 @@ +import os + +from nose import SkipTest + +from funtests import transport + + +class test_SQS(transport.TransportCase): + transport = 'SQS' + prefix = 'sqs' + event_loop_max = 100 + message_size_limit = 4192 # SQS max body size / 2. + reliable_purge = False + suppress_disorder_warning = True # does not guarantee FIFO order, + # even in simple cases. + + def before_connect(self): + try: + import boto # noqa + except ImportError: + raise SkipTest('boto not installed') + if 'AWS_ACCESS_KEY_ID' not in os.environ: + raise SkipTest('Missing envvar AWS_ACCESS_KEY_ID') + if 'AWS_SECRET_ACCESS_KEY' not in os.environ: + raise SkipTest('Missing envvar AWS_SECRET_ACCESS_KEY') + + def after_connect(self, connection): + connection.channel().sqs diff --git a/funtests/tests/test_amqp.py b/funtests/tests/test_amqp.py new file mode 100644 index 0000000..5ca0c2e --- /dev/null +++ b/funtests/tests/test_amqp.py @@ -0,0 +1,6 @@ +from funtests import transport + + +class test_pyamqp(transport.TransportCase): + transport = 'pyamqp' + prefix = 'pyamqp' diff --git a/funtests/tests/test_amqplib.py b/funtests/tests/test_amqplib.py new file mode 100644 index 0000000..549147a --- /dev/null +++ b/funtests/tests/test_amqplib.py @@ -0,0 +1,14 @@ +from nose import SkipTest + +from funtests import transport + + +class test_amqplib(transport.TransportCase): + transport = 'amqplib' + prefix = 'amqplib' + + def before_connect(self): + try: + import amqplib # noqa + except ImportError: + raise SkipTest('amqplib not installed') diff --git a/funtests/tests/test_beanstalk.py b/funtests/tests/test_beanstalk.py new file mode 100644 index 0000000..d15f6a5 --- /dev/null +++ b/funtests/tests/test_beanstalk.py @@ -0,0 +1,19 @@ +from funtests import transport + +from nose import SkipTest + + +class test_beanstalk(transport.TransportCase): + transport = 'beanstalk' + prefix = 'beanstalk' + event_loop_max = 10 + message_size_limit = 47662 + + def before_connect(self): + try: + import beanstalkc # noqa + except ImportError: + raise SkipTest('beanstalkc not installed') + + def after_connect(self, connection): + connection.channel().client diff --git a/funtests/tests/test_couchdb.py b/funtests/tests/test_couchdb.py new file mode 100644 index 0000000..697d0e2 --- /dev/null +++ b/funtests/tests/test_couchdb.py @@ -0,0 +1,18 @@ +from nose import SkipTest + +from funtests import transport + + +class test_couchdb(transport.TransportCase): + transport = 'couchdb' + prefix = 'couchdb' + event_loop_max = 100 + + def before_connect(self): + try: + import couchdb # noqa + except ImportError: + raise SkipTest('couchdb not installed') + + def after_connect(self, connection): + connection.channel().client diff --git a/funtests/tests/test_django.py b/funtests/tests/test_django.py new file mode 100644 index 0000000..7858d1f --- /dev/null +++ b/funtests/tests/test_django.py @@ -0,0 +1,37 @@ +from nose import SkipTest + +from kombu.tests.case import redirect_stdouts + +from funtests import transport + + +class test_django(transport.TransportCase): + transport = 'django' + prefix = 'django' + event_loop_max = 10 + + def before_connect(self): + + @redirect_stdouts + def setup_django(stdout, stderr): + try: + import django # noqa + except ImportError: + raise SkipTest('django not installed') + from django.conf import settings + if not settings.configured: + settings.configure( + DATABASE_ENGINE='sqlite3', + DATABASE_NAME=':memory:', + DATABASES={ + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': ':memory:', + }, + }, + INSTALLED_APPS=('kombu.transport.django', ), + ) + from django.core.management import call_command + call_command('syncdb') + + setup_django() diff --git a/funtests/tests/test_librabbitmq.py b/funtests/tests/test_librabbitmq.py new file mode 100644 index 0000000..57b406d --- /dev/null +++ b/funtests/tests/test_librabbitmq.py @@ -0,0 +1,14 @@ +from nose import SkipTest + +from funtests import transport + + +class test_librabbitmq(transport.TransportCase): + transport = 'librabbitmq' + prefix = 'librabbitmq' + + def before_connect(self): + try: + import librabbitmq # noqa + except ImportError: + raise SkipTest('librabbitmq not installed') diff --git a/funtests/tests/test_mongodb.py b/funtests/tests/test_mongodb.py new file mode 100644 index 0000000..fca8192 --- /dev/null +++ b/funtests/tests/test_mongodb.py @@ -0,0 +1,80 @@ +from nose import SkipTest + +from kombu import Consumer, Producer, Exchange, Queue +from kombu.five import range +from kombu.utils import nested + +from funtests import transport + + +class test_mongodb(transport.TransportCase): + transport = 'mongodb' + prefix = 'mongodb' + event_loop_max = 100 + + def before_connect(self): + try: + import pymongo # noqa + except ImportError: + raise SkipTest('pymongo not installed') + + def after_connect(self, connection): + connection.channel().client # evaluate connection. + + self.c = self.connection # shortcut + + def test_fanout(self, name='test_mongodb_fanout'): + if not self.verify_alive(): + return + c = self.connection + self.e = Exchange(name, type='fanout') + self.q = Queue(name, exchange=self.e, routing_key=name) + self.q2 = Queue(name + '2', exchange=self.e, routing_key=name + '2') + + channel = c.default_channel + producer = Producer(channel, self.e) + consumer1 = Consumer(channel, self.q) + consumer2 = Consumer(channel, self.q2) + self.q2(channel).declare() + + for i in range(10): + producer.publish({'foo': i}, routing_key=name) + for i in range(10): + producer.publish({'foo': i}, routing_key=name + '2') + + _received1 = [] + _received2 = [] + + def callback1(message_data, message): + _received1.append(message) + message.ack() + + def callback2(message_data, message): + _received2.append(message) + message.ack() + + consumer1.register_callback(callback1) + consumer2.register_callback(callback2) + + with nested(consumer1, consumer2): + + while 1: + if len(_received1) + len(_received2) == 20: + break + c.drain_events(timeout=60) + self.assertEqual(len(_received1) + len(_received2), 20) + + # queue.delete + for i in range(10): + producer.publish({'foo': i}, routing_key=name) + self.assertTrue(self.q(channel).get()) + self.q(channel).delete() + self.q(channel).declare() + self.assertIsNone(self.q(channel).get()) + + # queue.purge + for i in range(10): + producer.publish({'foo': i}, routing_key=name + '2') + self.assertTrue(self.q2(channel).get()) + self.q2(channel).purge() + self.assertIsNone(self.q2(channel).get()) diff --git a/funtests/tests/test_pyamqp.py b/funtests/tests/test_pyamqp.py new file mode 100644 index 0000000..5ca0c2e --- /dev/null +++ b/funtests/tests/test_pyamqp.py @@ -0,0 +1,6 @@ +from funtests import transport + + +class test_pyamqp(transport.TransportCase): + transport = 'pyamqp' + prefix = 'pyamqp' diff --git a/funtests/tests/test_redis.py b/funtests/tests/test_redis.py new file mode 100644 index 0000000..1e40a3a --- /dev/null +++ b/funtests/tests/test_redis.py @@ -0,0 +1,22 @@ +from nose import SkipTest + +from funtests import transport + + +class test_redis(transport.TransportCase): + transport = 'redis' + prefix = 'redis' + + def before_connect(self): + try: + import redis # noqa + except ImportError: + raise SkipTest('redis not installed') + + def after_connect(self, connection): + client = connection.channel().client + client.info() + + def test_cant_connect_raises_connection_error(self): + conn = self.get_connection(port=65534) + self.assertRaises(conn.connection_errors, conn.connect) diff --git a/funtests/tests/test_sqla.py b/funtests/tests/test_sqla.py new file mode 100644 index 0000000..7aa5c4a --- /dev/null +++ b/funtests/tests/test_sqla.py @@ -0,0 +1,16 @@ +from nose import SkipTest + +from funtests import transport + + +class test_sqla(transport.TransportCase): + transport = 'sqlalchemy' + prefix = 'sqlalchemy' + event_loop_max = 10 + connection_options = {'hostname': 'sqla+sqlite://'} + + def before_connect(self): + try: + import sqlalchemy # noqa + except ImportError: + raise SkipTest('sqlalchemy not installed') diff --git a/funtests/tests/test_zookeeper.py b/funtests/tests/test_zookeeper.py new file mode 100644 index 0000000..7c3ae0e --- /dev/null +++ b/funtests/tests/test_zookeeper.py @@ -0,0 +1,18 @@ +from nose import SkipTest + +from funtests import transport + + +class test_zookeeper(transport.TransportCase): + transport = 'zookeeper' + prefix = 'zookeeper' + event_loop_max = 100 + + def before_connect(self): + try: + import kazoo # noqa + except ImportError: + raise SkipTest('kazoo not installed') + + def after_connect(self, connection): + connection.channel().client diff --git a/funtests/transport.py b/funtests/transport.py new file mode 100644 index 0000000..1887233 --- /dev/null +++ b/funtests/transport.py @@ -0,0 +1,313 @@ +from __future__ import absolute_import, print_function + +import random +import socket +import string +import sys +import time +import unittest2 as unittest +import warnings +import weakref + +from nose import SkipTest + +from kombu import Connection +from kombu import Exchange, Queue +from kombu.five import range +from kombu.tests.case import skip_if_quick + +if sys.version_info >= (2, 5): + from hashlib import sha256 as _digest +else: + from sha import new as _digest # noqa + + +def say(msg): + print(msg, file=sys.stderr) + + +def _nobuf(x): + return [str(i) if isinstance(i, buffer) else i for i in x] + + +def consumeN(conn, consumer, n=1, timeout=30): + messages = [] + + def callback(message_data, message): + messages.append(message_data) + message.ack() + + prev, consumer.callbacks = consumer.callbacks, [callback] + consumer.consume() + + seconds = 0 + while True: + try: + conn.drain_events(timeout=1) + except socket.timeout: + seconds += 1 + msg = 'Received %s/%s messages. %s seconds passed.' % ( + len(messages), n, seconds) + if seconds >= timeout: + raise socket.timeout(msg) + if seconds > 1: + say(msg) + if len(messages) >= n: + break + + consumer.cancel() + consumer.callback = prev + return messages + + +class TransportCase(unittest.TestCase): + transport = None + prefix = None + sep = '.' + userid = None + password = None + event_loop_max = 100 + connection_options = {} + suppress_disorder_warning = False + reliable_purge = True + + connected = False + skip_test_reason = None + + message_size_limit = None + + def before_connect(self): + pass + + def after_connect(self, connection): + pass + + def setUp(self): + if self.transport: + try: + self.before_connect() + except SkipTest as exc: + self.skip_test_reason = str(exc) + else: + self.do_connect() + self.exchange = Exchange(self.prefix, 'direct') + self.queue = Queue(self.prefix, self.exchange, self.prefix) + + def purge(self, names): + chan = self.connection.channel() + total = 0 + for queue in names: + while 1: + # ensure the queue is completly empty + purged = chan.queue_purge(queue=queue) + if not purged: + break + total += purged + chan.close() + return total + + def get_connection(self, **options): + if self.userid: + options.setdefault('userid', self.userid) + if self.password: + options.setdefault('password', self.password) + return Connection(transport=self.transport, **options) + + def do_connect(self): + self.connection = self.get_connection(**self.connection_options) + try: + self.connection.connect() + self.after_connect(self.connection) + except self.connection.connection_errors: + self.skip_test_reason = '%s transport cannot connect' % ( + self.transport, ) + else: + self.connected = True + + def verify_alive(self): + if self.transport: + if not self.connected: + raise SkipTest(self.skip_test_reason) + return True + + def purge_consumer(self, consumer): + return self.purge([queue.name for queue in consumer.queues]) + + def test_produce__consume(self): + if not self.verify_alive(): + return + chan1 = self.connection.channel() + consumer = chan1.Consumer(self.queue) + self.purge_consumer(consumer) + producer = chan1.Producer(self.exchange) + producer.publish({'foo': 'bar'}, routing_key=self.prefix) + message = consumeN(self.connection, consumer) + self.assertDictEqual(message[0], {'foo': 'bar'}) + chan1.close() + self.purge([self.queue.name]) + + def test_purge(self): + if not self.verify_alive(): + return + chan1 = self.connection.channel() + consumer = chan1.Consumer(self.queue) + self.purge_consumer(consumer) + + producer = chan1.Producer(self.exchange) + for i in range(10): + producer.publish({'foo': 'bar'}, routing_key=self.prefix) + if self.reliable_purge: + self.assertEqual(consumer.purge(), 10) + self.assertEqual(consumer.purge(), 0) + else: + purged = 0 + while purged < 9: + purged += self.purge_consumer(consumer) + + def _digest(self, data): + return _digest(data).hexdigest() + + @skip_if_quick + def test_produce__consume_large_messages( + self, bytes=1048576, n=10, + charset=string.punctuation + string.letters + string.digits): + if not self.verify_alive(): + return + bytes = min(x for x in [bytes, self.message_size_limit] if x) + messages = [''.join(random.choice(charset) + for j in range(bytes)) + '--%s' % n + for i in range(n)] + digests = [] + chan1 = self.connection.channel() + consumer = chan1.Consumer(self.queue) + self.purge_consumer(consumer) + producer = chan1.Producer(self.exchange) + for i, message in enumerate(messages): + producer.publish({'text': message, + 'i': i}, routing_key=self.prefix) + digests.append(self._digest(message)) + + received = [(msg['i'], msg['text']) + for msg in consumeN(self.connection, consumer, n)] + self.assertEqual(len(received), n) + ordering = [i for i, _ in received] + if ordering != list(range(n)) and not self.suppress_disorder_warning: + warnings.warn( + '%s did not deliver messages in FIFO order: %r' % ( + self.transport, ordering)) + + for i, text in received: + if text != messages[i]: + raise AssertionError('%i: %r is not %r' % ( + i, text[-100:], messages[i][-100:])) + self.assertEqual(self._digest(text), digests[i]) + + chan1.close() + self.purge([self.queue.name]) + + def P(self, rest): + return '%s%s%s' % (self.prefix, self.sep, rest) + + def test_produce__consume_multiple(self): + if not self.verify_alive(): + return + chan1 = self.connection.channel() + producer = chan1.Producer(self.exchange) + b1 = Queue(self.P('b1'), self.exchange, 'b1')(chan1) + b2 = Queue(self.P('b2'), self.exchange, 'b2')(chan1) + b3 = Queue(self.P('b3'), self.exchange, 'b3')(chan1) + [q.declare() for q in (b1, b2, b3)] + self.purge([b1.name, b2.name, b3.name]) + + producer.publish('b1', routing_key='b1') + producer.publish('b2', routing_key='b2') + producer.publish('b3', routing_key='b3') + chan1.close() + + chan2 = self.connection.channel() + consumer = chan2.Consumer([b1, b2, b3]) + messages = consumeN(self.connection, consumer, 3) + self.assertItemsEqual(_nobuf(messages), ['b1', 'b2', 'b3']) + chan2.close() + self.purge([self.P('b1'), self.P('b2'), self.P('b3')]) + + def test_timeout(self): + if not self.verify_alive(): + return + chan = self.connection.channel() + self.purge([self.queue.name]) + consumer = chan.Consumer(self.queue) + self.assertRaises( + socket.timeout, self.connection.drain_events, timeout=0.3, + ) + consumer.cancel() + chan.close() + + def test_basic_get(self): + if not self.verify_alive(): + return + chan1 = self.connection.channel() + producer = chan1.Producer(self.exchange) + chan2 = self.connection.channel() + queue = Queue(self.P('basic_get'), self.exchange, 'basic_get') + queue = queue(chan2) + queue.declare() + producer.publish({'basic.get': 'this'}, routing_key='basic_get') + chan1.close() + + for i in range(self.event_loop_max): + m = queue.get() + if m: + break + time.sleep(0.1) + self.assertEqual(m.payload, {'basic.get': 'this'}) + self.purge([queue.name]) + chan2.close() + + def test_cyclic_reference_transport(self): + if not self.verify_alive(): + return + + def _createref(): + conn = self.get_connection() + conn.transport + conn.close() + return weakref.ref(conn) + + self.assertIsNone(_createref()()) + + def test_cyclic_reference_connection(self): + if not self.verify_alive(): + return + + def _createref(): + conn = self.get_connection() + conn.connect() + conn.close() + return weakref.ref(conn) + + self.assertIsNone(_createref()()) + + def test_cyclic_reference_channel(self): + if not self.verify_alive(): + return + + def _createref(): + conn = self.get_connection() + conn.connect() + chanrefs = [] + try: + for i in range(100): + channel = conn.channel() + chanrefs.append(weakref.ref(channel)) + channel.close() + finally: + conn.close() + return chanrefs + + for chanref in _createref(): + self.assertIsNone(chanref()) + + def tearDown(self): + if self.transport and self.connected: + self.connection.close() diff --git a/kombu.egg-info/PKG-INFO b/kombu.egg-info/PKG-INFO new file mode 100644 index 0000000..88e69c4 --- /dev/null +++ b/kombu.egg-info/PKG-INFO @@ -0,0 +1,355 @@ +Metadata-Version: 1.1 +Name: kombu +Version: 3.0.21 +Summary: Messaging library for Python +Home-page: http://kombu.readthedocs.org +Author: Ask Solem +Author-email: ask@celeryproject.org +License: UNKNOWN +Description: .. _kombu-index: + + ======================================== + kombu - Messaging library for Python + ======================================== + + :Version: 3.0.21 + + `Kombu` is a messaging library for Python. + + The aim of `Kombu` is to make messaging in Python as easy as possible by + providing an idiomatic high-level interface for the AMQ protocol, and also + provide proven and tested solutions to common messaging problems. + + `AMQP`_ is the Advanced Message Queuing Protocol, an open standard protocol + for message orientation, queuing, routing, reliability and security, + for which the `RabbitMQ`_ messaging server is the most popular implementation. + + Features + ======== + + * Allows application authors to support several message server + solutions by using pluggable transports. + + * AMQP transport using the `py-amqp`_ or `librabbitmq`_ client libraries. + + * High performance AMQP transport written in C - when using `librabbitmq`_ + + This is automatically enabled if librabbitmq is installed:: + + $ pip install librabbitmq + + * Virtual transports makes it really easy to add support for non-AMQP + transports. There is already built-in support for `Redis`_, + `Beanstalk`_, `Amazon SQS`_, `CouchDB`_, `MongoDB`_, `ZeroMQ`_, + `ZooKeeper`_, `SoftLayer MQ`_ and `Pyro`_. + + * You can also use the SQLAlchemy and Django ORM transports to + use a database as the broker. + + * In-memory transport for unit testing. + + * Supports automatic encoding, serialization and compression of message + payloads. + + * Consistent exception handling across transports. + + * The ability to ensure that an operation is performed by gracefully + handling connection and channel errors. + + * Several annoyances with `amqplib`_ has been fixed, like supporting + timeouts and the ability to wait for events on more than one channel. + + * Projects already using `carrot`_ can easily be ported by using + a compatibility layer. + + For an introduction to AMQP you should read the article `Rabbits and warrens`_, + and the `Wikipedia article about AMQP`_. + + .. _`RabbitMQ`: http://www.rabbitmq.com/ + .. _`AMQP`: http://amqp.org + .. _`py-amqp`: http://pypi.python.org/pypi/amqp/ + .. _`Redis`: http://code.google.com/p/redis/ + .. _`Amazon SQS`: http://aws.amazon.com/sqs/ + .. _`MongoDB`: http://www.mongodb.org/ + .. _`CouchDB`: http://couchdb.apache.org/ + .. _`ZeroMQ`: http://zeromq.org/ + .. _`Zookeeper`: https://zookeeper.apache.org/ + .. _`Beanstalk`: http://kr.github.com/beanstalkd/ + .. _`Rabbits and warrens`: http://blogs.digitar.com/jjww/2009/01/rabbits-and-warrens/ + .. _`amqplib`: http://barryp.org/software/py-amqplib/ + .. _`Wikipedia article about AMQP`: http://en.wikipedia.org/wiki/AMQP + .. _`carrot`: http://pypi.python.org/pypi/carrot/ + .. _`librabbitmq`: http://pypi.python.org/pypi/librabbitmq + .. _`Pyro`: http://pythonhosting.org/Pyro + .. _`SoftLayer MQ`: http://www.softlayer.com/services/additional/message-queue + + + .. _transport-comparison: + + Transport Comparison + ==================== + + +---------------+----------+------------+------------+---------------+ + | **Client** | **Type** | **Direct** | **Topic** | **Fanout** | + +---------------+----------+------------+------------+---------------+ + | *amqp* | Native | Yes | Yes | Yes | + +---------------+----------+------------+------------+---------------+ + | *redis* | Virtual | Yes | Yes | Yes (PUB/SUB) | + +---------------+----------+------------+------------+---------------+ + | *mongodb* | Virtual | Yes | Yes | Yes | + +---------------+----------+------------+------------+---------------+ + | *beanstalk* | Virtual | Yes | Yes [#f1]_ | No | + +---------------+----------+------------+------------+---------------+ + | *SQS* | Virtual | Yes | Yes [#f1]_ | Yes [#f2]_ | + +---------------+----------+------------+------------+---------------+ + | *couchdb* | Virtual | Yes | Yes [#f1]_ | No | + +---------------+----------+------------+------------+---------------+ + | *zookeeper* | Virtual | Yes | Yes [#f1]_ | No | + +---------------+----------+------------+------------+---------------+ + | *in-memory* | Virtual | Yes | Yes [#f1]_ | No | + +---------------+----------+------------+------------+---------------+ + | *django* | Virtual | Yes | Yes [#f1]_ | No | + +---------------+----------+------------+------------+---------------+ + | *sqlalchemy* | Virtual | Yes | Yes [#f1]_ | No | + +---------------+----------+------------+------------+---------------+ + | *SLMQ* | Virtual | Yes | Yes [#f1]_ | No | + +---------------+----------+------------+------------+---------------+ + + + .. [#f1] Declarations only kept in memory, so exchanges/queues + must be declared by all clients that needs them. + + .. [#f2] Fanout supported via storing routing tables in SimpleDB. + Disabled by default, but can be enabled by using the + ``supports_fanout`` transport option. + + + Documentation + ------------- + + Kombu is using Sphinx, and the latest documentation can be found here: + + http://kombu.readthedocs.org/ + + Quick overview + -------------- + + :: + + from kombu import Connection, Exchange, Queue + + media_exchange = Exchange('media', 'direct', durable=True) + video_queue = Queue('video', exchange=media_exchange, routing_key='video') + + def process_media(body, message): + print body + message.ack() + + # connections + with Connection('amqp://guest:guest@localhost//') as conn: + + # produce + producer = conn.Producer(serializer='json') + producer.publish({'name': '/tmp/lolcat1.avi', 'size': 1301013}, + exchange=media_exchange, routing_key='video', + declare=[video_queue]) + + # the declare above, makes sure the video queue is declared + # so that the messages can be delivered. + # It's a best practice in Kombu to have both publishers and + # consumers declare the queue. You can also declare the + # queue manually using: + # video_queue(conn).declare() + + # consume + with conn.Consumer(video_queue, callbacks=[process_media]) as consumer: + # Process messages and handle events on all channels + while True: + conn.drain_events() + + # Consume from several queues on the same channel: + video_queue = Queue('video', exchange=media_exchange, key='video') + image_queue = Queue('image', exchange=media_exchange, key='image') + + with connection.Consumer([video_queue, image_queue], + callbacks=[process_media]) as consumer: + while True: + connection.drain_events() + + + Or handle channels manually:: + + with connection.channel() as channel: + producer = Producer(channel, ...) + consumer = Producer(channel) + + + All objects can be used outside of with statements too, + just remember to close the objects after use:: + + from kombu import Connection, Consumer, Producer + + connection = Connection() + # ... + connection.release() + + consumer = Consumer(channel_or_connection, ...) + consumer.register_callback(my_callback) + consumer.consume() + # .... + consumer.cancel() + + + `Exchange` and `Queue` are simply declarations that can be pickled + and used in configuration files etc. + + They also support operations, but to do so they need to be bound + to a channel. + + Binding exchanges and queues to a connection will make it use + that connections default channel. + + :: + + >>> exchange = Exchange('tasks', 'direct') + + >>> connection = Connection() + >>> bound_exchange = exchange(connection) + >>> bound_exchange.delete() + + # the original exchange is not affected, and stays unbound. + >>> exchange.delete() + raise NotBoundError: Can't call delete on Exchange not bound to + a channel. + + Installation + ============ + + You can install `Kombu` either via the Python Package Index (PyPI) + or from source. + + To install using `pip`,:: + + $ pip install kombu + + To install using `easy_install`,:: + + $ easy_install kombu + + If you have downloaded a source tarball you can install it + by doing the following,:: + + $ python setup.py build + # python setup.py install # as root + + + Terminology + =========== + + There are some concepts you should be familiar with before starting: + + * Producers + + Producers sends messages to an exchange. + + * Exchanges + + Messages are sent to exchanges. Exchanges are named and can be + configured to use one of several routing algorithms. The exchange + routes the messages to consumers by matching the routing key in the + message with the routing key the consumer provides when binding to + the exchange. + + * Consumers + + Consumers declares a queue, binds it to a exchange and receives + messages from it. + + * Queues + + Queues receive messages sent to exchanges. The queues are declared + by consumers. + + * Routing keys + + Every message has a routing key. The interpretation of the routing + key depends on the exchange type. There are four default exchange + types defined by the AMQP standard, and vendors can define custom + types (so see your vendors manual for details). + + These are the default exchange types defined by AMQP/0.8: + + * Direct exchange + + Matches if the routing key property of the message and + the `routing_key` attribute of the consumer are identical. + + * Fan-out exchange + + Always matches, even if the binding does not have a routing + key. + + * Topic exchange + + Matches the routing key property of the message by a primitive + pattern matching scheme. The message routing key then consists + of words separated by dots (`"."`, like domain names), and + two special characters are available; star (`"*"`) and hash + (`"#"`). The star matches any word, and the hash matches + zero or more words. For example `"*.stock.#"` matches the + routing keys `"usd.stock"` and `"eur.stock.db"` but not + `"stock.nasdaq"`. + + Getting Help + ============ + + Mailing list + ------------ + + Join the `carrot-users`_ mailing list. + + .. _`carrot-users`: http://groups.google.com/group/carrot-users/ + + Bug tracker + =========== + + If you have any suggestions, bug reports or annoyances please report them + to our issue tracker at http://github.com/celery/kombu/issues/ + + Contributing + ============ + + Development of `Kombu` happens at Github: http://github.com/celery/kombu + + You are highly encouraged to participate in the development. If you don't + like Github (for some reason) you're welcome to send regular patches. + + License + ======= + + This software is licensed under the `New BSD License`. See the `LICENSE` + file in the top distribution directory for the full license text. + + .. image:: https://d2weczhvl823v0.cloudfront.net/celery/kombu/trend.png + :alt: Bitdeli badge + :target: https://bitdeli.com/free + +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: Implementation :: Jython +Classifier: Intended Audience :: Developers +Classifier: Topic :: Communications +Classifier: Topic :: System :: Distributed Computing +Classifier: Topic :: System :: Networking +Classifier: Topic :: Software Development :: Libraries :: Python Modules diff --git a/kombu.egg-info/SOURCES.txt b/kombu.egg-info/SOURCES.txt new file mode 100644 index 0000000..4726192 --- /dev/null +++ b/kombu.egg-info/SOURCES.txt @@ -0,0 +1,266 @@ +AUTHORS +Changelog +FAQ +INSTALL +LICENSE +MANIFEST.in +README.rst +THANKS +TODO +setup.cfg +setup.py +docs/Makefile +docs/changelog.rst +docs/conf.py +docs/faq.rst +docs/index.rst +docs/introduction.rst +docs/.static/.keep +docs/.templates/sidebarintro.html +docs/.templates/sidebarlogo.html +docs/_ext/applyxrefs.py +docs/_ext/literals_to_xrefs.py +docs/_theme/celery/theme.conf +docs/_theme/celery/static/celery.css_t +docs/images/kombu.jpg +docs/images/kombusmall.jpg +docs/reference/index.rst +docs/reference/kombu.abstract.rst +docs/reference/kombu.async.debug.rst +docs/reference/kombu.async.hub.rst +docs/reference/kombu.async.rst +docs/reference/kombu.async.semaphore.rst +docs/reference/kombu.async.timer.rst +docs/reference/kombu.clocks.rst +docs/reference/kombu.common.rst +docs/reference/kombu.compat.rst +docs/reference/kombu.compression.rst +docs/reference/kombu.connection.rst +docs/reference/kombu.exceptions.rst +docs/reference/kombu.five.rst +docs/reference/kombu.log.rst +docs/reference/kombu.message.rst +docs/reference/kombu.mixins.rst +docs/reference/kombu.pidbox.rst +docs/reference/kombu.pools.rst +docs/reference/kombu.rst +docs/reference/kombu.serialization.rst +docs/reference/kombu.simple.rst +docs/reference/kombu.syn.rst +docs/reference/kombu.transport.SLMQ.rst +docs/reference/kombu.transport.SQS.rst +docs/reference/kombu.transport.amqplib.rst +docs/reference/kombu.transport.base.rst +docs/reference/kombu.transport.beanstalk.rst +docs/reference/kombu.transport.couchdb.rst +docs/reference/kombu.transport.django.management.commands.clean_kombu_messages.rst +docs/reference/kombu.transport.django.managers.rst +docs/reference/kombu.transport.django.models.rst +docs/reference/kombu.transport.django.rst +docs/reference/kombu.transport.filesystem.rst +docs/reference/kombu.transport.librabbitmq.rst +docs/reference/kombu.transport.memory.rst +docs/reference/kombu.transport.mongodb.rst +docs/reference/kombu.transport.pyamqp.rst +docs/reference/kombu.transport.pyro.rst +docs/reference/kombu.transport.redis.rst +docs/reference/kombu.transport.rst +docs/reference/kombu.transport.sqlalchemy.models.rst +docs/reference/kombu.transport.sqlalchemy.rst +docs/reference/kombu.transport.virtual.exchange.rst +docs/reference/kombu.transport.virtual.rst +docs/reference/kombu.transport.virtual.scheduling.rst +docs/reference/kombu.transport.zmq.rst +docs/reference/kombu.transport.zookeeper.rst +docs/reference/kombu.utils.amq_manager.rst +docs/reference/kombu.utils.compat.rst +docs/reference/kombu.utils.debug.rst +docs/reference/kombu.utils.encoding.rst +docs/reference/kombu.utils.eventio.rst +docs/reference/kombu.utils.functional.rst +docs/reference/kombu.utils.limits.rst +docs/reference/kombu.utils.rst +docs/reference/kombu.utils.text.rst +docs/reference/kombu.utils.url.rst +docs/userguide/connections.rst +docs/userguide/consumers.rst +docs/userguide/examples.rst +docs/userguide/index.rst +docs/userguide/introduction.rst +docs/userguide/pools.rst +docs/userguide/producers.rst +docs/userguide/serialization.rst +docs/userguide/simple.rst +examples/complete_receive.py +examples/complete_send.py +examples/hello_consumer.py +examples/hello_publisher.py +examples/simple_eventlet_receive.py +examples/simple_eventlet_send.py +examples/simple_receive.py +examples/simple_send.py +examples/experimental/async_consume.py +examples/simple_task_queue/__init__.py +examples/simple_task_queue/client.py +examples/simple_task_queue/queues.py +examples/simple_task_queue/tasks.py +examples/simple_task_queue/worker.py +extra/doc2ghpages +extra/release/bump_version.py +extra/release/doc4allmods +extra/release/flakeplus.py +extra/release/removepyc.sh +extra/release/verify-reference-index.sh +funtests/__init__.py +funtests/setup.cfg +funtests/setup.py +funtests/transport.py +funtests/tests/__init__.py +funtests/tests/test_SLMQ.py +funtests/tests/test_SQS.py +funtests/tests/test_amqp.py +funtests/tests/test_amqplib.py +funtests/tests/test_beanstalk.py +funtests/tests/test_couchdb.py +funtests/tests/test_django.py +funtests/tests/test_librabbitmq.py +funtests/tests/test_mongodb.py +funtests/tests/test_pyamqp.py +funtests/tests/test_redis.py +funtests/tests/test_sqla.py +funtests/tests/test_zookeeper.py +kombu/__init__.py +kombu/abstract.py +kombu/clocks.py +kombu/common.py +kombu/compat.py +kombu/compression.py +kombu/connection.py +kombu/entity.py +kombu/exceptions.py +kombu/five.py +kombu/log.py +kombu/message.py +kombu/messaging.py +kombu/mixins.py +kombu/pidbox.py +kombu/pools.py +kombu/serialization.py +kombu/simple.py +kombu/syn.py +kombu.egg-info/PKG-INFO +kombu.egg-info/SOURCES.txt +kombu.egg-info/dependency_links.txt +kombu.egg-info/not-zip-safe +kombu.egg-info/requires.txt +kombu.egg-info/top_level.txt +kombu/async/__init__.py +kombu/async/debug.py +kombu/async/hub.py +kombu/async/semaphore.py +kombu/async/timer.py +kombu/tests/__init__.py +kombu/tests/case.py +kombu/tests/mocks.py +kombu/tests/test_clocks.py +kombu/tests/test_common.py +kombu/tests/test_compat.py +kombu/tests/test_compression.py +kombu/tests/test_connection.py +kombu/tests/test_entities.py +kombu/tests/test_log.py +kombu/tests/test_messaging.py +kombu/tests/test_mixins.py +kombu/tests/test_pidbox.py +kombu/tests/test_pools.py +kombu/tests/test_serialization.py +kombu/tests/test_simple.py +kombu/tests/test_syn.py +kombu/tests/async/__init__.py +kombu/tests/async/test_hub.py +kombu/tests/async/test_semaphore.py +kombu/tests/transport/__init__.py +kombu/tests/transport/test_SQS.py +kombu/tests/transport/test_amqplib.py +kombu/tests/transport/test_base.py +kombu/tests/transport/test_filesystem.py +kombu/tests/transport/test_librabbitmq.py +kombu/tests/transport/test_memory.py +kombu/tests/transport/test_mongodb.py +kombu/tests/transport/test_pyamqp.py +kombu/tests/transport/test_redis.py +kombu/tests/transport/test_sqlalchemy.py +kombu/tests/transport/test_transport.py +kombu/tests/transport/virtual/__init__.py +kombu/tests/transport/virtual/test_base.py +kombu/tests/transport/virtual/test_exchange.py +kombu/tests/transport/virtual/test_scheduling.py +kombu/tests/utils/__init__.py +kombu/tests/utils/test_amq_manager.py +kombu/tests/utils/test_debug.py +kombu/tests/utils/test_encoding.py +kombu/tests/utils/test_functional.py +kombu/tests/utils/test_utils.py +kombu/transport/SLMQ.py +kombu/transport/SQS.py +kombu/transport/__init__.py +kombu/transport/amqplib.py +kombu/transport/base.py +kombu/transport/beanstalk.py +kombu/transport/couchdb.py +kombu/transport/filesystem.py +kombu/transport/librabbitmq.py +kombu/transport/memory.py +kombu/transport/mongodb.py +kombu/transport/pyamqp.py +kombu/transport/pyro.py +kombu/transport/redis.py +kombu/transport/zmq.py +kombu/transport/zookeeper.py +kombu/transport/django/__init__.py +kombu/transport/django/managers.py +kombu/transport/django/models.py +kombu/transport/django/management/__init__.py +kombu/transport/django/management/commands/__init__.py +kombu/transport/django/management/commands/clean_kombu_messages.py +kombu/transport/django/migrations/0001_initial.py +kombu/transport/django/migrations/__init__.py +kombu/transport/sqlalchemy/__init__.py +kombu/transport/sqlalchemy/models.py +kombu/transport/virtual/__init__.py +kombu/transport/virtual/exchange.py +kombu/transport/virtual/scheduling.py +kombu/utils/__init__.py +kombu/utils/amq_manager.py +kombu/utils/compat.py +kombu/utils/debug.py +kombu/utils/encoding.py +kombu/utils/eventio.py +kombu/utils/functional.py +kombu/utils/limits.py +kombu/utils/text.py +kombu/utils/url.py +requirements/default.txt +requirements/dev.txt +requirements/docs.txt +requirements/funtest.txt +requirements/pkgutils.txt +requirements/py26.txt +requirements/test-ci.txt +requirements/test-ci3.txt +requirements/test.txt +requirements/test3.txt +requirements/extras/beanstalk.txt +requirements/extras/couchdb.txt +requirements/extras/kazoo.txt +requirements/extras/librabbitmq.txt +requirements/extras/mongodb.txt +requirements/extras/msgpack.txt +requirements/extras/pyro.txt +requirements/extras/redis.txt +requirements/extras/slmq.txt +requirements/extras/sqlalchemy.txt +requirements/extras/sqs.txt +requirements/extras/yaml.txt +requirements/extras/zeromq.txt +requirements/extras/zookeeper.txt \ No newline at end of file diff --git a/kombu.egg-info/dependency_links.txt b/kombu.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/kombu.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/kombu.egg-info/not-zip-safe b/kombu.egg-info/not-zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/kombu.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/kombu.egg-info/requires.txt b/kombu.egg-info/requires.txt new file mode 100644 index 0000000..44783ad --- /dev/null +++ b/kombu.egg-info/requires.txt @@ -0,0 +1,45 @@ +anyjson>=0.3.3 +amqp>=1.4.5,<2.0 + +[sqlalchemy] +sqlalchemy + +[librabbitmq] +librabbitmq>=1.5.2 + +[sqs] +boto>=2.13.3 + +[mongodb] +pymongo>=2.6.2 + +[zookeeper] +kazoo>=1.3.1 + +[beanstalk] +beanstalkc + +[slmq] +softlayer_messaging>=1.0.3 + +[msgpack] +msgpack-python>=0.3.0 + +[couchdb] +couchdb + +[redis] +redis>=2.8.0 + +[:python_version=="2.6"] +importlib +ordereddict + +[pyro] +pyro4 + +[yaml] +PyYAML>=3.10 + +[zeromq] +pyzmq>=13.1.0 \ No newline at end of file diff --git a/kombu.egg-info/top_level.txt b/kombu.egg-info/top_level.txt new file mode 100644 index 0000000..3a8f969 --- /dev/null +++ b/kombu.egg-info/top_level.txt @@ -0,0 +1 @@ +kombu diff --git a/kombu/__init__.py b/kombu/__init__.py new file mode 100644 index 0000000..9825e32 --- /dev/null +++ b/kombu/__init__.py @@ -0,0 +1,108 @@ +"""Messaging library for Python""" +from __future__ import absolute_import + +from collections import namedtuple + +version_info_t = namedtuple( + 'version_info_t', ('major', 'minor', 'micro', 'releaselevel', 'serial'), +) + +VERSION = version_info_t(3, 0, 21, '', '') +__version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION) +__author__ = 'Ask Solem' +__contact__ = 'ask@celeryproject.org' +__homepage__ = 'http://kombu.readthedocs.org' +__docformat__ = 'restructuredtext en' + +# -eof meta- + +import os +import sys + +if sys.version_info < (2, 6): # pragma: no cover + raise Exception('Kombu 3.1 requires Python versions 2.6 or later.') + +STATICA_HACK = True +globals()['kcah_acitats'[::-1].upper()] = False +if STATICA_HACK: # pragma: no cover + # This is never executed, but tricks static analyzers (PyDev, PyCharm, + # pylint, etc.) into knowing the types of these symbols, and what + # they contain. + from kombu.connection import Connection, BrokerConnection # noqa + from kombu.entity import Exchange, Queue, binding # noqa + from kombu.messaging import Consumer, Producer # noqa + from kombu.pools import connections, producers # noqa + from kombu.utils.url import parse_url # noqa + from kombu.common import eventloop, uuid # noqa + from kombu.serialization import ( # noqa + enable_insecure_serializers, + disable_insecure_serializers, + ) + +# Lazy loading. +# - See werkzeug/__init__.py for the rationale behind this. +from types import ModuleType + +all_by_module = { + 'kombu.connection': ['Connection', 'BrokerConnection'], + 'kombu.entity': ['Exchange', 'Queue', 'binding'], + 'kombu.messaging': ['Consumer', 'Producer'], + 'kombu.pools': ['connections', 'producers'], + 'kombu.utils.url': ['parse_url'], + 'kombu.common': ['eventloop', 'uuid'], + 'kombu.serialization': ['enable_insecure_serializers', + 'disable_insecure_serializers'], +} + +object_origins = {} +for module, items in all_by_module.items(): + for item in items: + object_origins[item] = module + + +class module(ModuleType): + + def __getattr__(self, name): + if name in object_origins: + module = __import__(object_origins[name], None, None, [name]) + for extra_name in all_by_module[module.__name__]: + setattr(self, extra_name, getattr(module, extra_name)) + return getattr(module, name) + return ModuleType.__getattribute__(self, name) + + def __dir__(self): + result = list(new_module.__all__) + result.extend(('__file__', '__path__', '__doc__', '__all__', + '__docformat__', '__name__', '__path__', 'VERSION', + '__package__', '__version__', '__author__', + '__contact__', '__homepage__', '__docformat__')) + return result + +# 2.5 does not define __package__ +try: + package = __package__ +except NameError: # pragma: no cover + package = 'kombu' + +# keep a reference to this module so that it's not garbage collected +old_module = sys.modules[__name__] + +new_module = sys.modules[__name__] = module(__name__) +new_module.__dict__.update({ + '__file__': __file__, + '__path__': __path__, + '__doc__': __doc__, + '__all__': tuple(object_origins), + '__version__': __version__, + '__author__': __author__, + '__contact__': __contact__, + '__homepage__': __homepage__, + '__docformat__': __docformat__, + '__package__': package, + 'version_info_t': version_info_t, + 'VERSION': VERSION}) + +if os.environ.get('KOMBU_LOG_DEBUG'): # pragma: no cover + os.environ.update(KOMBU_LOG_CHANNEL='1', KOMBU_LOG_CONNECTION='1') + from .utils import debug + debug.setup_logging() diff --git a/kombu/abstract.py b/kombu/abstract.py new file mode 100644 index 0000000..6dff848 --- /dev/null +++ b/kombu/abstract.py @@ -0,0 +1,116 @@ +""" +kombu.abstract +============== + +Object utilities. + +""" +from __future__ import absolute_import + +from copy import copy + +from .connection import maybe_channel +from .exceptions import NotBoundError +from .utils import ChannelPromise + +__all__ = ['Object', 'MaybeChannelBound'] + + +def unpickle_dict(cls, kwargs): + return cls(**kwargs) + + +class Object(object): + """Common base class supporting automatic kwargs->attributes handling, + and cloning.""" + attrs = () + + def __init__(self, *args, **kwargs): + any = lambda v: v + for name, type_ in self.attrs: + value = kwargs.get(name) + if value is not None: + setattr(self, name, (type_ or any)(value)) + else: + try: + getattr(self, name) + except AttributeError: + setattr(self, name, None) + + def as_dict(self, recurse=False): + def f(obj, type): + if recurse and isinstance(obj, Object): + return obj.as_dict(recurse=True) + return type(obj) if type else obj + return dict( + (attr, f(getattr(self, attr), type)) for attr, type in self.attrs + ) + + def __reduce__(self): + return unpickle_dict, (self.__class__, self.as_dict()) + + def __copy__(self): + return self.__class__(**self.as_dict()) + + +class MaybeChannelBound(Object): + """Mixin for classes that can be bound to an AMQP channel.""" + _channel = None + _is_bound = False + + #: Defines whether maybe_declare can skip declaring this entity twice. + can_cache_declaration = False + + def __call__(self, channel): + """`self(channel) -> self.bind(channel)`""" + return self.bind(channel) + + def bind(self, channel): + """Create copy of the instance that is bound to a channel.""" + return copy(self).maybe_bind(channel) + + def maybe_bind(self, channel): + """Bind instance to channel if not already bound.""" + if not self.is_bound and channel: + self._channel = maybe_channel(channel) + self.when_bound() + self._is_bound = True + return self + + def revive(self, channel): + """Revive channel after the connection has been re-established. + + Used by :meth:`~kombu.Connection.ensure`. + + """ + if self.is_bound: + self._channel = channel + self.when_bound() + + def when_bound(self): + """Callback called when the class is bound.""" + pass + + def __repr__(self, item=''): + item = item or type(self).__name__ + if self.is_bound: + return '<{0} bound to chan:{1}>'.format( + item or type(self).__name__, self.channel.channel_id) + return ''.format(item) + + @property + def is_bound(self): + """Flag set if the channel is bound.""" + return self._is_bound and self._channel is not None + + @property + def channel(self): + """Current channel if the object is bound.""" + channel = self._channel + if channel is None: + raise NotBoundError( + "Can't call method on {0} not bound to a channel".format( + type(self).__name__)) + if isinstance(channel, ChannelPromise): + channel = self._channel = channel() + return channel diff --git a/kombu/async/__init__.py b/kombu/async/__init__.py new file mode 100644 index 0000000..c6e8e8e --- /dev/null +++ b/kombu/async/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +""" +kombu.async +=========== + +Event loop implementation. + +""" +from __future__ import absolute_import + +from .hub import Hub, get_event_loop, set_event_loop + +from kombu.utils.eventio import READ, WRITE, ERR + +__all__ = ['READ', 'WRITE', 'ERR', 'Hub', 'get_event_loop', 'set_event_loop'] diff --git a/kombu/async/debug.py b/kombu/async/debug.py new file mode 100644 index 0000000..80cdcb7 --- /dev/null +++ b/kombu/async/debug.py @@ -0,0 +1,60 @@ +from __future__ import absolute_import + +from kombu.five import items +from kombu.utils import reprcall +from kombu.utils.eventio import READ, WRITE, ERR + + +def repr_flag(flag): + return '{0}{1}{2}'.format('R' if flag & READ else '', + 'W' if flag & WRITE else '', + '!' if flag & ERR else '') + + +def _rcb(obj): + if obj is None: + return '' + if isinstance(obj, str): + return obj + if isinstance(obj, tuple): + cb, args = obj + return reprcall(cb.__name__, args=args) + return obj.__name__ + + +def repr_active(h): + return ', '.join(repr_readers(h) + repr_writers(h)) + + +def repr_events(h, events): + return ', '.join( + '{0}({1})->{2}'.format( + _rcb(callback_for(h, fd, fl, '(GONE)')), fd, + repr_flag(fl), + ) + for fd, fl in events + ) + + +def repr_readers(h): + return ['({0}){1}->{2}'.format(fd, _rcb(cb), repr_flag(READ | ERR)) + for fd, cb in items(h.readers)] + + +def repr_writers(h): + return ['({0}){1}->{2}'.format(fd, _rcb(cb), repr_flag(WRITE)) + for fd, cb in items(h.writers)] + + +def callback_for(h, fd, flag, *default): + try: + if flag & READ: + return h.readers[fd] + if flag & WRITE: + if fd in h.consolidate: + return h.consolidate_callback + return h.writers[fd] + except KeyError: + if default: + return default[0] + raise diff --git a/kombu/async/hub.py b/kombu/async/hub.py new file mode 100644 index 0000000..cea77dd --- /dev/null +++ b/kombu/async/hub.py @@ -0,0 +1,350 @@ +# -*- coding: utf-8 -*- +""" +kombu.async.hub +=============== + +Event loop implementation. + +""" +from __future__ import absolute_import + +import errno + +from collections import deque +from contextlib import contextmanager +from time import sleep +from types import GeneratorType as generator + +from amqp import promise + +from kombu.five import Empty, range +from kombu.log import get_logger +from kombu.utils import cached_property, fileno +from kombu.utils.compat import get_errno +from kombu.utils.eventio import READ, WRITE, ERR, poll + +from .timer import Timer + +__all__ = ['Hub', 'get_event_loop', 'set_event_loop'] +logger = get_logger(__name__) + +_current_loop = None + + +class Stop(BaseException): + """Stops the event loop.""" + + +def _raise_stop_error(): + raise Stop() + + +@contextmanager +def _dummy_context(*args, **kwargs): + yield + + +def get_event_loop(): + return _current_loop + + +def set_event_loop(loop): + global _current_loop + _current_loop = loop + return loop + + +class Hub(object): + """Event loop object. + + :keyword timer: Specify timer object. + + """ + #: Flag set if reading from an fd will not block. + READ = READ + + #: Flag set if writing to an fd will not block. + WRITE = WRITE + + #: Flag set on error, and the fd should be read from asap. + ERR = ERR + + #: List of callbacks to be called when the loop is exiting, + #: applied with the hub instance as sole argument. + on_close = None + + def __init__(self, timer=None): + self.timer = timer if timer is not None else Timer() + + self.readers = {} + self.writers = {} + self.on_tick = set() + self.on_close = set() + self._ready = deque() + + self._running = False + self._loop = None + + # The eventloop (in celery.worker.loops) + # will merge fds in this set and then instead of calling + # the callback for each ready fd it will call the + # :attr:`consolidate_callback` with the list of ready_fds + # as an argument. This API is internal and is only + # used by the multiprocessing pool to find inqueues + # that are ready to write. + self.consolidate = set() + self.consolidate_callback = None + + self.propagate_errors = () + + self._create_poller() + + def reset(self): + self.close() + self._create_poller() + + def _create_poller(self): + self.poller = poll() + self._register_fd = self.poller.register + self._unregister_fd = self.poller.unregister + + def _close_poller(self): + if self.poller is not None: + self.poller.close() + self.poller = None + self._register_fd = None + self._unregister_fd = None + + def stop(self): + self.call_soon(_raise_stop_error) + + def __repr__(self): + return ''.format( + id(self), len(self.readers), len(self.writers), + ) + + def fire_timers(self, min_delay=1, max_delay=10, max_timers=10, + propagate=()): + timer = self.timer + delay = None + if timer and timer._queue: + for i in range(max_timers): + delay, entry = next(self.scheduler) + if entry is None: + break + try: + entry() + except propagate: + raise + except (MemoryError, AssertionError): + raise + except OSError as exc: + if get_errno(exc) == errno.ENOMEM: + raise + logger.error('Error in timer: %r', exc, exc_info=1) + except Exception as exc: + logger.error('Error in timer: %r', exc, exc_info=1) + return min(max(delay or 0, min_delay), max_delay) + + def add(self, fd, callback, flags, args=(), consolidate=False): + fd = fileno(fd) + try: + self.poller.register(fd, flags) + except ValueError: + self._discard(fd) + raise + else: + dest = self.readers if flags & READ else self.writers + if consolidate: + self.consolidate.add(fd) + dest[fd] = None + else: + dest[fd] = callback, args + + def remove(self, fd): + fd = fileno(fd) + self._unregister(fd) + self._discard(fd) + + def run_forever(self): + self._running = True + try: + while 1: + try: + self.run_once() + except Stop: + break + finally: + self._running = False + + def run_once(self): + try: + next(self.loop) + except StopIteration: + self._loop = None + + def call_soon(self, callback, *args): + handle = promise(callback, args) + self._ready.append(handle) + return handle + + def call_later(self, delay, callback, *args): + return self.timer.call_after(delay, callback, args) + + def call_at(self, when, callback, *args): + return self.timer.call_at(when, callback, args) + + def call_repeatedly(self, delay, callback, *args): + return self.timer.call_repeatedly(delay, callback, args) + + def add_reader(self, fds, callback, *args): + return self.add(fds, callback, READ | ERR, args) + + def add_writer(self, fds, callback, *args): + return self.add(fds, callback, WRITE, args) + + def remove_reader(self, fd): + writable = fd in self.writers + on_write = self.writers.get(fd) + try: + self._unregister(fd) + self._discard(fd) + finally: + if writable: + cb, args = on_write + self.add(fd, cb, WRITE, args) + + def remove_writer(self, fd): + readable = fd in self.readers + on_read = self.readers.get(fd) + try: + self._unregister(fd) + self._discard(fd) + finally: + if readable: + cb, args = on_read + self.add(fd, cb, READ | ERR, args) + + def _unregister(self, fd): + try: + self.poller.unregister(fd) + except (AttributeError, KeyError, OSError): + pass + + def close(self, *args): + [self._unregister(fd) for fd in self.readers] + self.readers.clear() + [self._unregister(fd) for fd in self.writers] + self.writers.clear() + self.consolidate.clear() + self._close_poller() + for callback in self.on_close: + callback(self) + + def _discard(self, fd): + fd = fileno(fd) + self.readers.pop(fd, None) + self.writers.pop(fd, None) + self.consolidate.discard(fd) + + def create_loop(self, + generator=generator, sleep=sleep, min=min, next=next, + Empty=Empty, StopIteration=StopIteration, + KeyError=KeyError, READ=READ, WRITE=WRITE, ERR=ERR): + readers, writers = self.readers, self.writers + poll = self.poller.poll + fire_timers = self.fire_timers + hub_remove = self.remove + scheduled = self.timer._queue + consolidate = self.consolidate + consolidate_callback = self.consolidate_callback + on_tick = self.on_tick + todo = self._ready + propagate = self.propagate_errors + + while 1: + for tick_callback in on_tick: + tick_callback() + + while todo: + item = todo.popleft() + if item: + item() + + poll_timeout = fire_timers(propagate=propagate) if scheduled else 1 + if readers or writers: + to_consolidate = [] + try: + events = poll(poll_timeout) + except ValueError: # Issue 882 + raise StopIteration() + + for fd, event in events or (): + if fd in consolidate and \ + writers.get(fd) is None: + to_consolidate.append(fd) + continue + cb = cbargs = None + + if event & READ: + try: + cb, cbargs = readers[fd] + except KeyError: + self.remove_reader(fd) + continue + elif event & WRITE: + try: + cb, cbargs = writers[fd] + except KeyError: + self.remove_writer(fd) + continue + elif event & ERR: + try: + cb, cbargs = (readers.get(fd) or + writers.get(fd)) + except TypeError: + pass + + if cb is None: + continue + if isinstance(cb, generator): + try: + next(cb) + except OSError as exc: + if get_errno(exc) != errno.EBADF: + raise + hub_remove(fd) + except StopIteration: + pass + except Exception: + hub_remove(fd) + raise + else: + try: + cb(*cbargs) + except Empty: + pass + if to_consolidate: + consolidate_callback(to_consolidate) + else: + # no sockets yet, startup is probably not done. + sleep(min(poll_timeout, 0.1)) + yield + + def repr_active(self): + from .debug import repr_active + return repr_active(self) + + def repr_events(self, events): + from .debug import repr_events + return repr_events(self, events) + + @cached_property + def scheduler(self): + return iter(self.timer) + + @property + def loop(self): + if self._loop is None: + self._loop = self.create_loop() + return self._loop diff --git a/kombu/async/semaphore.py b/kombu/async/semaphore.py new file mode 100644 index 0000000..b446441 --- /dev/null +++ b/kombu/async/semaphore.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +""" +kombu.async.semaphore +===================== + +Semaphores and concurrency primitives. + +""" +from __future__ import absolute_import + +from collections import deque + +__all__ = ['DummyLock', 'LaxBoundedSemaphore'] + + +class LaxBoundedSemaphore(object): + """Asynchronous Bounded Semaphore. + + Lax means that the value will stay within the specified + range even if released more times than it was acquired. + + Example: + + >>> from future import print_statement as printf + # ^ ignore: just fooling stupid pyflakes + + >>> x = LaxBoundedSemaphore(2) + + >>> x.acquire(printf, 'HELLO 1') + HELLO 1 + + >>> x.acquire(printf, 'HELLO 2') + HELLO 2 + + >>> x.acquire(printf, 'HELLO 3') + >>> x._waiters # private, do not access directly + [print, ('HELLO 3', )] + + >>> x.release() + HELLO 3 + + """ + + def __init__(self, value): + self.initial_value = self.value = value + self._waiting = deque() + self._add_waiter = self._waiting.append + self._pop_waiter = self._waiting.popleft + + def acquire(self, callback, *partial_args): + """Acquire semaphore, applying ``callback`` if + the resource is available. + + :param callback: The callback to apply. + :param \*partial_args: partial arguments to callback. + + """ + value = self.value + if value <= 0: + self._add_waiter((callback, partial_args)) + return False + else: + self.value = max(value - 1, 0) + callback(*partial_args) + return True + + def release(self): + """Release semaphore. + + If there are any waiters this will apply the first waiter + that is waiting for the resource (FIFO order). + + """ + try: + waiter, args = self._pop_waiter() + except IndexError: + self.value = min(self.value + 1, self.initial_value) + else: + waiter(*args) + + def grow(self, n=1): + """Change the size of the semaphore to accept more users.""" + self.initial_value += n + self.value += n + [self.release() for _ in range(n)] + + def shrink(self, n=1): + """Change the size of the semaphore to accept less users.""" + self.initial_value = max(self.initial_value - n, 0) + self.value = max(self.value - n, 0) + + def clear(self): + """Reset the semaphore, which also wipes out any waiting callbacks.""" + self._waiting.clear() + self.value = self.initial_value + + def __repr__(self): + return '<{0} at {1:#x} value:{2} waiting:{3}>'.format( + self.__class__.__name__, id(self), self.value, len(self._waiting), + ) + + +class DummyLock(object): + """Pretending to be a lock.""" + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + pass diff --git a/kombu/async/timer.py b/kombu/async/timer.py new file mode 100644 index 0000000..00f5412 --- /dev/null +++ b/kombu/async/timer.py @@ -0,0 +1,232 @@ +# -*- coding: utf-8 -*- +""" +kombu.async.timer +================= + +Timer scheduling Python callbacks. + +""" +from __future__ import absolute_import + +import heapq +import sys + +from collections import namedtuple +from datetime import datetime +from functools import wraps +from time import time +from weakref import proxy as weakrefproxy + +from kombu.five import monotonic +from kombu.log import get_logger +from kombu.utils.compat import timedelta_seconds + +try: + from pytz import utc +except ImportError: + utc = None + +DEFAULT_MAX_INTERVAL = 2 +EPOCH = datetime.utcfromtimestamp(0).replace(tzinfo=utc) +IS_PYPY = hasattr(sys, 'pypy_version_info') + +logger = get_logger(__name__) + +__all__ = ['Entry', 'Timer', 'to_timestamp'] + +scheduled = namedtuple('scheduled', ('eta', 'priority', 'entry')) + + +def to_timestamp(d, default_timezone=utc): + if isinstance(d, datetime): + if d.tzinfo is None: + d = d.replace(tzinfo=default_timezone) + return timedelta_seconds(d - EPOCH) + return d + + +class Entry(object): + if not IS_PYPY: # pragma: no cover + __slots__ = ( + 'fun', 'args', 'kwargs', 'tref', 'cancelled', + '_last_run', '__weakref__', + ) + + def __init__(self, fun, args=None, kwargs=None): + self.fun = fun + self.args = args or [] + self.kwargs = kwargs or {} + self.tref = weakrefproxy(self) + self._last_run = None + self.cancelled = False + + def __call__(self): + return self.fun(*self.args, **self.kwargs) + + def cancel(self): + try: + self.tref.cancelled = True + except ReferenceError: # pragma: no cover + pass + + def __repr__(self): + return ' id(other) + + def __le__(self, other): + return id(self) <= id(other) + + def __ge__(self, other): + return id(self) >= id(other) + + def __eq__(self, other): + return hash(self) == hash(other) + + def __ne__(self, other): + return not self.__eq__(other) + + +class Timer(object): + """ETA scheduler.""" + Entry = Entry + + on_error = None + + def __init__(self, max_interval=None, on_error=None, **kwargs): + self.max_interval = float(max_interval or DEFAULT_MAX_INTERVAL) + self.on_error = on_error or self.on_error + self._queue = [] + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.stop() + + def call_at(self, eta, fun, args=(), kwargs={}, priority=0): + return self.enter_at(self.Entry(fun, args, kwargs), eta, priority) + + def call_after(self, secs, fun, args=(), kwargs={}, priority=0): + return self.enter_after(secs, self.Entry(fun, args, kwargs), priority) + + def call_repeatedly(self, secs, fun, args=(), kwargs={}, priority=0): + tref = self.Entry(fun, args, kwargs) + + @wraps(fun) + def _reschedules(*args, **kwargs): + last, now = tref._last_run, monotonic() + lsince = (now - tref._last_run) if last else secs + try: + if lsince and lsince >= secs: + tref._last_run = now + return fun(*args, **kwargs) + finally: + if not tref.cancelled: + last = tref._last_run + next = secs - (now - last) if last else secs + self.enter_after(next, tref, priority) + + tref.fun = _reschedules + tref._last_run = None + return self.enter_after(secs, tref, priority) + + def enter_at(self, entry, eta=None, priority=0, time=time): + """Enter function into the scheduler. + + :param entry: Item to enter. + :keyword eta: Scheduled time as a :class:`datetime.datetime` object. + :keyword priority: Unused. + + """ + if eta is None: + eta = time() + if isinstance(eta, datetime): + try: + eta = to_timestamp(eta) + except Exception as exc: + if not self.handle_error(exc): + raise + return + return self._enter(eta, priority, entry) + + def enter_after(self, secs, entry, priority=0, time=time): + return self.enter_at(entry, time() + secs, priority) + + def _enter(self, eta, priority, entry, push=heapq.heappush): + push(self._queue, scheduled(eta, priority, entry)) + return entry + + def apply_entry(self, entry): + try: + entry() + except Exception as exc: + if not self.handle_error(exc): + logger.error('Error in timer: %r', exc, exc_info=True) + + def handle_error(self, exc_info): + if self.on_error: + self.on_error(exc_info) + return True + + def stop(self): + pass + + def __iter__(self, min=min, nowfun=time, + pop=heapq.heappop, push=heapq.heappush): + """This iterator yields a tuple of ``(entry, wait_seconds)``, + where if entry is :const:`None` the caller should wait + for ``wait_seconds`` until it polls the schedule again.""" + max_interval = self.max_interval + queue = self._queue + + while 1: + if queue: + eventA = queue[0] + now, eta = nowfun(), eventA[0] + + if now < eta: + yield min(eta - now, max_interval), None + else: + eventB = pop(queue) + + if eventB is eventA: + entry = eventA[2] + if not entry.cancelled: + yield None, entry + continue + else: + push(queue, eventB) + else: + yield None, None + + def clear(self): + self._queue[:] = [] # atomic, without creating a new list. + + def cancel(self, tref): + tref.cancel() + + def __len__(self): + return len(self._queue) + + def __nonzero__(self): + return True + + @property + def queue(self, _pop=heapq.heappop): + """Snapshot of underlying datastructure.""" + events = list(self._queue) + return [_pop(v) for v in [events] * len(events)] + + @property + def schedule(self): + return self diff --git a/kombu/clocks.py b/kombu/clocks.py new file mode 100644 index 0000000..a986238 --- /dev/null +++ b/kombu/clocks.py @@ -0,0 +1,148 @@ +""" +kombu.clocks +============ + +Logical Clocks and Synchronization. + +""" +from __future__ import absolute_import + +from threading import Lock +from itertools import islice +from operator import itemgetter + +from .five import zip + +__all__ = ['LamportClock', 'timetuple'] + +R_CLOCK = '_lamport(clock={0}, timestamp={1}, id={2} {3!r})' + + +class timetuple(tuple): + """Tuple of event clock information. + + Can be used as part of a heap to keep events ordered. + + :param clock: Event clock value. + :param timestamp: Event UNIX timestamp value. + :param id: Event host id (e.g. ``hostname:pid``). + :param obj: Optional obj to associate with this event. + + """ + __slots__ = () + + def __new__(cls, clock, timestamp, id, obj=None): + return tuple.__new__(cls, (clock, timestamp, id, obj)) + + def __repr__(self): + return R_CLOCK.format(*self) + + def __getnewargs__(self): + return tuple(self) + + def __lt__(self, other): + # 0: clock 1: timestamp 3: process id + try: + A, B = self[0], other[0] + # uses logical clock value first + if A and B: # use logical clock if available + if A == B: # equal clocks use lower process id + return self[2] < other[2] + return A < B + return self[1] < other[1] # ... or use timestamp + except IndexError: + return NotImplemented + __gt__ = lambda self, other: other < self + __le__ = lambda self, other: not other < self + __ge__ = lambda self, other: not self < other + + clock = property(itemgetter(0)) + timestamp = property(itemgetter(1)) + id = property(itemgetter(2)) + obj = property(itemgetter(3)) + + +class LamportClock(object): + """Lamport's logical clock. + + From Wikipedia: + + A Lamport logical clock is a monotonically incrementing software counter + maintained in each process. It follows some simple rules: + + * A process increments its counter before each event in that process; + * When a process sends a message, it includes its counter value with + the message; + * On receiving a message, the receiver process sets its counter to be + greater than the maximum of its own value and the received value + before it considers the message received. + + Conceptually, this logical clock can be thought of as a clock that only + has meaning in relation to messages moving between processes. When a + process receives a message, it resynchronizes its logical clock with + the sender. + + .. seealso:: + + * `Lamport timestamps`_ + + * `Lamports distributed mutex`_ + + .. _`Lamport Timestamps`: http://en.wikipedia.org/wiki/Lamport_timestamps + .. _`Lamports distributed mutex`: http://bit.ly/p99ybE + + *Usage* + + When sending a message use :meth:`forward` to increment the clock, + when receiving a message use :meth:`adjust` to sync with + the time stamp of the incoming message. + + """ + #: The clocks current value. + value = 0 + + def __init__(self, initial_value=0, Lock=Lock): + self.value = initial_value + self.mutex = Lock() + + def adjust(self, other): + with self.mutex: + value = self.value = max(self.value, other) + 1 + return value + + def forward(self): + with self.mutex: + self.value += 1 + return self.value + + def sort_heap(self, h): + """List of tuples containing at least two elements, representing + an event, where the first element is the event's scalar clock value, + and the second element is the id of the process (usually + ``"hostname:pid"``): ``sh([(clock, processid, ...?), (...)])`` + + The list must already be sorted, which is why we refer to it as a + heap. + + The tuple will not be unpacked, so more than two elements can be + present. + + Will return the latest event. + + """ + if h[0][0] == h[1][0]: + same = [] + for PN in zip(h, islice(h, 1, None)): + if PN[0][0] != PN[1][0]: + break # Prev and Next's clocks differ + same.append(PN[0]) + # return first item sorted by process id + return sorted(same, key=lambda event: event[1])[0] + # clock values unique, return first item + return h[0] + + def __str__(self): + return str(self.value) + + def __repr__(self): + return ''.format(self) diff --git a/kombu/common.py b/kombu/common.py new file mode 100644 index 0000000..99f4f6f --- /dev/null +++ b/kombu/common.py @@ -0,0 +1,398 @@ +""" +kombu.common +============ + +Common Utilities. + +""" +from __future__ import absolute_import + +import os +import socket +import threading + +from collections import deque +from contextlib import contextmanager +from functools import partial +from itertools import count +from uuid import getnode as _getnode, uuid4, uuid3, NAMESPACE_OID + +from amqp import RecoverableConnectionError + +from .entity import Exchange, Queue +from .five import range +from .log import get_logger +from .serialization import registry as serializers +from .utils import uuid + +try: + from _thread import get_ident +except ImportError: # pragma: no cover + try: # noqa + from thread import get_ident # noqa + except ImportError: # pragma: no cover + from dummy_thread import get_ident # noqa + +__all__ = ['Broadcast', 'maybe_declare', 'uuid', + 'itermessages', 'send_reply', + 'collect_replies', 'insured', 'drain_consumer', + 'eventloop'] + +#: Prefetch count can't exceed short. +PREFETCH_COUNT_MAX = 0xFFFF + +logger = get_logger(__name__) + +_node_id = None + + +def get_node_id(): + global _node_id + if _node_id is None: + _node_id = uuid4().int + return _node_id + + +def generate_oid(node_id, process_id, thread_id, instance): + ent = '%x-%x-%x-%x' % (get_node_id(), process_id, thread_id, id(instance)) + return str(uuid3(NAMESPACE_OID, ent)) + + +def oid_from(instance): + return generate_oid(_getnode(), os.getpid(), get_ident(), instance) + + +class Broadcast(Queue): + """Convenience class used to define broadcast queues. + + Every queue instance will have a unique name, + and both the queue and exchange is configured with auto deletion. + + :keyword name: This is used as the name of the exchange. + :keyword queue: By default a unique id is used for the queue + name for every consumer. You can specify a custom queue + name here. + :keyword \*\*kwargs: See :class:`~kombu.Queue` for a list + of additional keyword arguments supported. + + """ + + def __init__(self, name=None, queue=None, **kwargs): + return super(Broadcast, self).__init__( + name=queue or 'bcast.%s' % (uuid(), ), + **dict({'alias': name, + 'auto_delete': True, + 'exchange': Exchange(name, type='fanout')}, **kwargs)) + + +def declaration_cached(entity, channel): + return entity in channel.connection.client.declared_entities + + +def maybe_declare(entity, channel=None, retry=False, **retry_policy): + is_bound = entity.is_bound + + if not is_bound: + assert channel + entity = entity.bind(channel) + + if channel is None: + assert is_bound + channel = entity.channel + + declared = ident = None + if channel.connection and entity.can_cache_declaration: + declared = channel.connection.client.declared_entities + ident = hash(entity) + if ident in declared: + return False + + if retry: + return _imaybe_declare(entity, declared, ident, + channel, **retry_policy) + return _maybe_declare(entity, declared, ident, channel) + + +def _maybe_declare(entity, declared, ident, channel): + channel = channel or entity.channel + if not channel.connection: + raise RecoverableConnectionError('channel disconnected') + entity.declare() + if declared is not None and ident: + declared.add(ident) + return True + + +def _imaybe_declare(entity, declared, ident, channel, **retry_policy): + return entity.channel.connection.client.ensure( + entity, _maybe_declare, **retry_policy)( + entity, declared, ident, channel) + + +def drain_consumer(consumer, limit=1, timeout=None, callbacks=None): + acc = deque() + + def on_message(body, message): + acc.append((body, message)) + + consumer.callbacks = [on_message] + (callbacks or []) + + with consumer: + for _ in eventloop(consumer.channel.connection.client, + limit=limit, timeout=timeout, ignore_timeouts=True): + try: + yield acc.popleft() + except IndexError: + pass + + +def itermessages(conn, channel, queue, limit=1, timeout=None, + callbacks=None, **kwargs): + return drain_consumer(conn.Consumer(channel, queues=[queue], **kwargs), + limit=limit, timeout=timeout, callbacks=callbacks) + + +def eventloop(conn, limit=None, timeout=None, ignore_timeouts=False): + """Best practice generator wrapper around ``Connection.drain_events``. + + Able to drain events forever, with a limit, and optionally ignoring + timeout errors (a timeout of 1 is often used in environments where + the socket can get "stuck", and is a best practice for Kombu consumers). + + **Examples** + + ``eventloop`` is a generator:: + + from kombu.common import eventloop + + def run(connection): + it = eventloop(connection, timeout=1, ignore_timeouts=True) + next(it) # one event consumed, or timed out. + + for _ in eventloop(connection, timeout=1, ignore_timeouts=True): + pass # loop forever. + + It also takes an optional limit parameter, and timeout errors + are propagated by default:: + + for _ in eventloop(connection, limit=1, timeout=1): + pass + + .. seealso:: + + :func:`itermessages`, which is an event loop bound to one or more + consumers, that yields any messages received. + + """ + for i in limit and range(limit) or count(): + try: + yield conn.drain_events(timeout=timeout) + except socket.timeout: + if timeout and not ignore_timeouts: # pragma: no cover + raise + + +def send_reply(exchange, req, msg, + producer=None, retry=False, retry_policy=None, **props): + """Send reply for request. + + :param exchange: Reply exchange + :param req: Original request, a message with a ``reply_to`` property. + :param producer: Producer instance + :param retry: If true must retry according to ``reply_policy`` argument. + :param retry_policy: Retry settings. + :param props: Extra properties + + """ + + producer.publish( + msg, exchange=exchange, + retry=retry, retry_policy=retry_policy, + **dict({'routing_key': req.properties['reply_to'], + 'correlation_id': req.properties.get('correlation_id'), + 'serializer': serializers.type_to_name[req.content_type], + 'content_encoding': req.content_encoding}, **props) + ) + + +def collect_replies(conn, channel, queue, *args, **kwargs): + """Generator collecting replies from ``queue``""" + no_ack = kwargs.setdefault('no_ack', True) + received = False + try: + for body, message in itermessages(conn, channel, queue, + *args, **kwargs): + if not no_ack: + message.ack() + received = True + yield body + finally: + if received: + channel.after_reply_message_received(queue.name) + + +def _ensure_errback(exc, interval): + logger.error( + 'Connection error: %r. Retry in %ss\n', exc, interval, + exc_info=True, + ) + + +@contextmanager +def _ignore_errors(conn): + try: + yield + except conn.connection_errors + conn.channel_errors: + pass + + +def ignore_errors(conn, fun=None, *args, **kwargs): + """Ignore connection and channel errors. + + The first argument must be a connection object, or any other object + with ``connection_error`` and ``channel_error`` attributes. + + Can be used as a function: + + .. code-block:: python + + def example(connection): + ignore_errors(connection, consumer.channel.close) + + or as a context manager: + + .. code-block:: python + + def example(connection): + with ignore_errors(connection): + consumer.channel.close() + + + .. note:: + + Connection and channel errors should be properly handled, + and not ignored. Using this function is only acceptable in a cleanup + phase, like when a connection is lost or at shutdown. + + """ + if fun: + with _ignore_errors(conn): + return fun(*args, **kwargs) + return _ignore_errors(conn) + + +def revive_connection(connection, channel, on_revive=None): + if on_revive: + on_revive(channel) + + +def insured(pool, fun, args, kwargs, errback=None, on_revive=None, **opts): + """Ensures function performing broker commands completes + despite intermittent connection failures.""" + errback = errback or _ensure_errback + + with pool.acquire(block=True) as conn: + conn.ensure_connection(errback=errback) + # we cache the channel for subsequent calls, this has to be + # reset on revival. + channel = conn.default_channel + revive = partial(revive_connection, conn, on_revive=on_revive) + insured = conn.autoretry(fun, channel, errback=errback, + on_revive=revive, **opts) + retval, _ = insured(*args, **dict(kwargs, connection=conn)) + return retval + + +class QoS(object): + """Thread safe increment/decrement of a channels prefetch_count. + + :param callback: Function used to set new prefetch count, + e.g. ``consumer.qos`` or ``channel.basic_qos``. Will be called + with a single ``prefetch_count`` keyword argument. + :param initial_value: Initial prefetch count value. + + **Example usage** + + .. code-block:: python + + >>> from kombu import Consumer, Connection + >>> connection = Connection('amqp://') + >>> consumer = Consumer(connection) + >>> qos = QoS(consumer.qos, initial_prefetch_count=2) + >>> qos.update() # set initial + + >>> qos.value + 2 + + >>> def in_some_thread(): + ... qos.increment_eventually() + + >>> def in_some_other_thread(): + ... qos.decrement_eventually() + + >>> while 1: + ... if qos.prev != qos.value: + ... qos.update() # prefetch changed so update. + + It can be used with any function supporting a ``prefetch_count`` keyword + argument:: + + >>> channel = connection.channel() + >>> QoS(channel.basic_qos, 10) + + + >>> def set_qos(prefetch_count): + ... print('prefetch count now: %r' % (prefetch_count, )) + >>> QoS(set_qos, 10) + + """ + prev = None + + def __init__(self, callback, initial_value): + self.callback = callback + self._mutex = threading.RLock() + self.value = initial_value or 0 + + def increment_eventually(self, n=1): + """Increment the value, but do not update the channels QoS. + + The MainThread will be responsible for calling :meth:`update` + when necessary. + + """ + with self._mutex: + if self.value: + self.value = self.value + max(n, 0) + return self.value + + def decrement_eventually(self, n=1): + """Decrement the value, but do not update the channels QoS. + + The MainThread will be responsible for calling :meth:`update` + when necessary. + + """ + with self._mutex: + if self.value: + self.value -= n + if self.value < 1: + self.value = 1 + return self.value + + def set(self, pcount): + """Set channel prefetch_count setting.""" + if pcount != self.prev: + new_value = pcount + if pcount > PREFETCH_COUNT_MAX: + logger.warn('QoS: Disabled: prefetch_count exceeds %r', + PREFETCH_COUNT_MAX) + new_value = 0 + logger.debug('basic.qos: prefetch_count->%s', new_value) + self.callback(prefetch_count=new_value) + self.prev = pcount + return pcount + + def update(self): + """Update prefetch count with current value.""" + with self._mutex: + return self.set(self.value) diff --git a/kombu/compat.py b/kombu/compat.py new file mode 100644 index 0000000..7347e9b --- /dev/null +++ b/kombu/compat.py @@ -0,0 +1,215 @@ +""" +kombu.compat +============ + +Carrot compatible interface for :class:`Publisher` and :class:`Producer`. + +See http://packages.python.org/pypi/carrot for documentation. + +""" +from __future__ import absolute_import + +from itertools import count + +from . import messaging +from .entity import Exchange, Queue +from .five import items + +__all__ = ['Publisher', 'Consumer'] + +# XXX compat attribute +entry_to_queue = Queue.from_dict + + +def _iterconsume(connection, consumer, no_ack=False, limit=None): + consumer.consume(no_ack=no_ack) + for iteration in count(0): # for infinity + if limit and iteration >= limit: + raise StopIteration + yield connection.drain_events() + + +class Publisher(messaging.Producer): + exchange = '' + exchange_type = 'direct' + routing_key = '' + durable = True + auto_delete = False + _closed = False + + def __init__(self, connection, exchange=None, routing_key=None, + exchange_type=None, durable=None, auto_delete=None, + channel=None, **kwargs): + if channel: + connection = channel + + self.exchange = exchange or self.exchange + self.exchange_type = exchange_type or self.exchange_type + self.routing_key = routing_key or self.routing_key + + if auto_delete is not None: + self.auto_delete = auto_delete + if durable is not None: + self.durable = durable + + if not isinstance(self.exchange, Exchange): + self.exchange = Exchange(name=self.exchange, + type=self.exchange_type, + routing_key=self.routing_key, + auto_delete=self.auto_delete, + durable=self.durable) + super(Publisher, self).__init__(connection, self.exchange, **kwargs) + + def send(self, *args, **kwargs): + return self.publish(*args, **kwargs) + + def close(self): + super(Publisher, self).close() + self._closed = True + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + @property + def backend(self): + return self.channel + + +class Consumer(messaging.Consumer): + queue = '' + exchange = '' + routing_key = '' + exchange_type = 'direct' + durable = True + exclusive = False + auto_delete = False + exchange_type = 'direct' + _closed = False + + def __init__(self, connection, queue=None, exchange=None, + routing_key=None, exchange_type=None, durable=None, + exclusive=None, auto_delete=None, **kwargs): + self.backend = connection.channel() + + if durable is not None: + self.durable = durable + if exclusive is not None: + self.exclusive = exclusive + if auto_delete is not None: + self.auto_delete = auto_delete + + self.queue = queue or self.queue + self.exchange = exchange or self.exchange + self.exchange_type = exchange_type or self.exchange_type + self.routing_key = routing_key or self.routing_key + + exchange = Exchange(self.exchange, + type=self.exchange_type, + routing_key=self.routing_key, + auto_delete=self.auto_delete, + durable=self.durable) + queue = Queue(self.queue, + exchange=exchange, + routing_key=self.routing_key, + durable=self.durable, + exclusive=self.exclusive, + auto_delete=self.auto_delete) + super(Consumer, self).__init__(self.backend, queue, **kwargs) + + def revive(self, channel): + self.backend = channel + super(Consumer, self).revive(channel) + + def close(self): + self.cancel() + self.backend.close() + self._closed = True + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + def __iter__(self): + return self.iterqueue(infinite=True) + + def fetch(self, no_ack=None, enable_callbacks=False): + if no_ack is None: + no_ack = self.no_ack + message = self.queues[0].get(no_ack) + if message: + if enable_callbacks: + self.receive(message.payload, message) + return message + + def process_next(self): + raise NotImplementedError('Use fetch(enable_callbacks=True)') + + def discard_all(self, filterfunc=None): + if filterfunc is not None: + raise NotImplementedError( + 'discard_all does not implement filters') + return self.purge() + + def iterconsume(self, limit=None, no_ack=None): + return _iterconsume(self.connection, self, no_ack, limit) + + def wait(self, limit=None): + it = self.iterconsume(limit) + return list(it) + + def iterqueue(self, limit=None, infinite=False): + for items_since_start in count(): # for infinity + item = self.fetch() + if (not infinite and item is None) or \ + (limit and items_since_start >= limit): + raise StopIteration + yield item + + +class ConsumerSet(messaging.Consumer): + + def __init__(self, connection, from_dict=None, consumers=None, + channel=None, **kwargs): + if channel: + self._provided_channel = True + self.backend = channel + else: + self._provided_channel = False + self.backend = connection.channel() + + queues = [] + if consumers: + for consumer in consumers: + queues.extend(consumer.queues) + if from_dict: + for queue_name, queue_options in items(from_dict): + queues.append(Queue.from_dict(queue_name, **queue_options)) + + super(ConsumerSet, self).__init__(self.backend, queues, **kwargs) + + def iterconsume(self, limit=None, no_ack=False): + return _iterconsume(self.connection, self, no_ack, limit) + + def discard_all(self): + return self.purge() + + def add_consumer_from_dict(self, queue, **options): + return self.add_queue_from_dict(queue, **options) + + def add_consumer(self, consumer): + for queue in consumer.queues: + self.add_queue(queue) + + def revive(self, channel): + self.backend = channel + super(ConsumerSet, self).revive(channel) + + def close(self): + self.cancel() + if not self._provided_channel: + self.channel.close() diff --git a/kombu/compression.py b/kombu/compression.py new file mode 100644 index 0000000..866433d --- /dev/null +++ b/kombu/compression.py @@ -0,0 +1,83 @@ +""" +kombu.compression +================= + +Compression utilities. + +""" +from __future__ import absolute_import + +from kombu.utils.encoding import ensure_bytes + +import zlib + +_aliases = {} +_encoders = {} +_decoders = {} + +__all__ = ['register', 'encoders', 'get_encoder', + 'get_decoder', 'compress', 'decompress'] + + +def register(encoder, decoder, content_type, aliases=[]): + """Register new compression method. + + :param encoder: Function used to compress text. + :param decoder: Function used to decompress previously compressed text. + :param content_type: The mime type this compression method identifies as. + :param aliases: A list of names to associate with this compression method. + + """ + _encoders[content_type] = encoder + _decoders[content_type] = decoder + _aliases.update((alias, content_type) for alias in aliases) + + +def encoders(): + """Return a list of available compression methods.""" + return list(_encoders) + + +def get_encoder(t): + """Get encoder by alias name.""" + t = _aliases.get(t, t) + return _encoders[t], t + + +def get_decoder(t): + """Get decoder by alias name.""" + return _decoders[_aliases.get(t, t)] + + +def compress(body, content_type): + """Compress text. + + :param body: The text to compress. + :param content_type: mime-type of compression method to use. + + """ + encoder, content_type = get_encoder(content_type) + return encoder(ensure_bytes(body)), content_type + + +def decompress(body, content_type): + """Decompress compressed text. + + :param body: Previously compressed text to uncompress. + :param content_type: mime-type of compression method used. + + """ + return get_decoder(content_type)(body) + + +register(zlib.compress, + zlib.decompress, + 'application/x-gzip', aliases=['gzip', 'zlib']) +try: + import bz2 +except ImportError: + pass # Jython? +else: + register(bz2.compress, + bz2.decompress, + 'application/x-bz2', aliases=['bzip2', 'bzip']) diff --git a/kombu/connection.py b/kombu/connection.py new file mode 100644 index 0000000..291d680 --- /dev/null +++ b/kombu/connection.py @@ -0,0 +1,1059 @@ +""" +kombu.connection +================ + +Broker connection and pools. + +""" +from __future__ import absolute_import + +import os +import socket + +from contextlib import contextmanager +from itertools import count, cycle +from operator import itemgetter + +# jython breaks on relative import for .exceptions for some reason +# (Issue #112) +from kombu import exceptions +from .five import Empty, range, string_t, text_t, LifoQueue as _LifoQueue +from .log import get_logger +from .transport import get_transport_cls, supports_librabbitmq +from .utils import cached_property, retry_over_time, shufflecycle, HashedSeq +from .utils.compat import OrderedDict +from .utils.functional import lazy +from .utils.url import as_url, parse_url, quote, urlparse + +__all__ = ['Connection', 'ConnectionPool', 'ChannelPool'] + +RESOLVE_ALIASES = {'pyamqp': 'amqp', + 'librabbitmq': 'amqp'} + +_LOG_CONNECTION = os.environ.get('KOMBU_LOG_CONNECTION', False) +_LOG_CHANNEL = os.environ.get('KOMBU_LOG_CHANNEL', False) + +logger = get_logger(__name__) +roundrobin_failover = cycle + +failover_strategies = { + 'round-robin': roundrobin_failover, + 'shuffle': shufflecycle, +} + + +class Connection(object): + """A connection to the broker. + + :param URL: Broker URL, or a list of URLs, e.g. + + .. code-block:: python + + Connection('amqp://guest:guest@localhost:5672//') + Connection('amqp://foo;amqp://bar', failover_strategy='round-robin') + Connection('redis://', transport_options={ + 'visibility_timeout': 3000, + }) + + import ssl + Connection('amqp://', login_method='EXTERNAL', ssl={ + 'ca_certs': '/etc/pki/tls/certs/something.crt', + 'keyfile': '/etc/something/system.key', + 'certfile': '/etc/something/system.cert', + 'cert_reqs': ssl.CERT_REQUIRED, + }) + + .. admonition:: SSL compatibility + + SSL currently only works with the py-amqp & amqplib transports. + For other transports you can use stunnel. + + :keyword hostname: Default host name/address if not provided in the URL. + :keyword userid: Default user name if not provided in the URL. + :keyword password: Default password if not provided in the URL. + :keyword virtual_host: Default virtual host if not provided in the URL. + :keyword port: Default port if not provided in the URL. + :keyword ssl: Use SSL to connect to the server. Default is ``False``. + May not be supported by the specified transport. + :keyword transport: Default transport if not specified in the URL. + :keyword connect_timeout: Timeout in seconds for connecting to the + server. May not be supported by the specified transport. + :keyword transport_options: A dict of additional connection arguments to + pass to alternate kombu channel implementations. Consult the transport + documentation for available options. + :keyword heartbeat: Heartbeat interval in int/float seconds. + Note that if heartbeats are enabled then the :meth:`heartbeat_check` + method must be called regularly, around once per second. + + .. note:: + + The connection is established lazily when needed. If you need the + connection to be established, then force it by calling + :meth:`connect`:: + + >>> conn = Connection('amqp://') + >>> conn.connect() + + and always remember to close the connection:: + + >>> conn.release() + + """ + port = None + virtual_host = '/' + connect_timeout = 5 + + _closed = None + _connection = None + _default_channel = None + _transport = None + _logger = False + uri_prefix = None + + #: The cache of declared entities is per connection, + #: in case the server loses data. + declared_entities = None + + #: Iterator returning the next broker URL to try in the event + #: of connection failure (initialized by :attr:`failover_strategy`). + cycle = None + + #: Additional transport specific options, + #: passed on to the transport instance. + transport_options = None + + #: Strategy used to select new hosts when reconnecting after connection + #: failure. One of "round-robin", "shuffle" or any custom iterator + #: constantly yielding new URLs to try. + failover_strategy = 'round-robin' + + #: Heartbeat value, currently only supported by the py-amqp transport. + heartbeat = None + + hostname = userid = password = ssl = login_method = None + + def __init__(self, hostname='localhost', userid=None, + password=None, virtual_host=None, port=None, insist=False, + ssl=False, transport=None, connect_timeout=5, + transport_options=None, login_method=None, uri_prefix=None, + heartbeat=0, failover_strategy='round-robin', + alternates=None, **kwargs): + alt = [] if alternates is None else alternates + # have to spell the args out, just to get nice docstrings :( + params = self._initial_params = { + 'hostname': hostname, 'userid': userid, + 'password': password, 'virtual_host': virtual_host, + 'port': port, 'insist': insist, 'ssl': ssl, + 'transport': transport, 'connect_timeout': connect_timeout, + 'login_method': login_method, 'heartbeat': heartbeat + } + + if hostname and not isinstance(hostname, string_t): + alt.extend(hostname) + hostname = alt[0] + if hostname and '://' in hostname: + if ';' in hostname: + alt.extend(hostname.split(';')) + hostname = alt[0] + if '+' in hostname[:hostname.index('://')]: + # e.g. sqla+mysql://root:masterkey@localhost/ + params['transport'], params['hostname'] = \ + hostname.split('+', 1) + transport = self.uri_prefix = params['transport'] + else: + transport = transport or urlparse(hostname).scheme + if get_transport_cls(transport).can_parse_url: + # set the transport so that the default is not used. + params['transport'] = transport + else: + # we must parse the URL + params.update(parse_url(hostname)) + self._init_params(**params) + + # fallback hosts + self.alt = alt + self.failover_strategy = failover_strategies.get( + failover_strategy or 'round-robin') or failover_strategy + if self.alt: + self.cycle = self.failover_strategy(self.alt) + next(self.cycle) # skip first entry + + if transport_options is None: + transport_options = {} + self.transport_options = transport_options + + if _LOG_CONNECTION: # pragma: no cover + self._logger = True + + if uri_prefix: + self.uri_prefix = uri_prefix + + self.declared_entities = set() + + def switch(self, url): + """Switch connection parameters to use a new URL (does not + reconnect)""" + self.close() + self.declared_entities.clear() + self._closed = False + self._init_params(**dict(self._initial_params, **parse_url(url))) + + def maybe_switch_next(self): + """Switch to next URL given by the current failover strategy (if + any).""" + if self.cycle: + self.switch(next(self.cycle)) + + def _init_params(self, hostname, userid, password, virtual_host, port, + insist, ssl, transport, connect_timeout, + login_method, heartbeat): + transport = transport or 'amqp' + if transport == 'amqp' and supports_librabbitmq(): + transport = 'librabbitmq' + self.hostname = hostname + self.userid = userid + self.password = password + self.login_method = login_method + self.virtual_host = virtual_host or self.virtual_host + self.port = port or self.port + self.insist = insist + self.connect_timeout = connect_timeout + self.ssl = ssl + self.transport_cls = transport + self.heartbeat = heartbeat and float(heartbeat) + + def register_with_event_loop(self, loop): + self.transport.register_with_event_loop(self.connection, loop) + + def _debug(self, msg, *args, **kwargs): + if self._logger: # pragma: no cover + fmt = '[Kombu connection:0x{id:x}] {msg}' + logger.debug(fmt.format(id=id(self), msg=text_t(msg)), + *args, **kwargs) + + def connect(self): + """Establish connection to server immediately.""" + self._closed = False + return self.connection + + def channel(self): + """Create and return a new channel.""" + self._debug('create channel') + chan = self.transport.create_channel(self.connection) + if _LOG_CHANNEL: # pragma: no cover + from .utils.debug import Logwrapped + return Logwrapped(chan, 'kombu.channel', + '[Kombu channel:{0.channel_id}] ') + return chan + + def heartbeat_check(self, rate=2): + """Allow the transport to perform any periodic tasks + required to make heartbeats work. This should be called + approximately every second. + + If the current transport does not support heartbeats then + this is a noop operation. + + :keyword rate: Rate is how often the tick is called + compared to the actual heartbeat value. E.g. if + the heartbeat is set to 3 seconds, and the tick + is called every 3 / 2 seconds, then the rate is 2. + This value is currently unused by any transports. + + """ + return self.transport.heartbeat_check(self.connection, rate=rate) + + def drain_events(self, **kwargs): + """Wait for a single event from the server. + + :keyword timeout: Timeout in seconds before we give up. + + + :raises :exc:`socket.timeout`: if the timeout is exceeded. + + """ + return self.transport.drain_events(self.connection, **kwargs) + + def maybe_close_channel(self, channel): + """Close given channel, but ignore connection and channel errors.""" + try: + channel.close() + except (self.connection_errors + self.channel_errors): + pass + + def _do_close_self(self): + # Close only connection and channel(s), but not transport. + self.declared_entities.clear() + if self._default_channel: + self.maybe_close_channel(self._default_channel) + if self._connection: + try: + self.transport.close_connection(self._connection) + except self.connection_errors + (AttributeError, socket.error): + pass + self._connection = None + + def _close(self): + """Really close connection, even if part of a connection pool.""" + self._do_close_self() + if self._transport: + self._transport.client = None + self._transport = None + self._debug('closed') + self._closed = True + + def collect(self, socket_timeout=None): + # amqp requires communication to close, we don't need that just + # to clear out references, Transport._collect can also be implemented + # by other transports that want fast after fork + try: + gc_transport = self._transport._collect + except AttributeError: + _timeo = socket.getdefaulttimeout() + socket.setdefaulttimeout(socket_timeout) + try: + self._close() + except socket.timeout: + pass + finally: + socket.setdefaulttimeout(_timeo) + else: + gc_transport(self._connection) + if self._transport: + self._transport.client = None + self._transport = None + self.declared_entities.clear() + self._connection = None + + def release(self): + """Close the connection (if open).""" + self._close() + close = release + + def ensure_connection(self, errback=None, max_retries=None, + interval_start=2, interval_step=2, interval_max=30, + callback=None): + """Ensure we have a connection to the server. + + If not retry establishing the connection with the settings + specified. + + :keyword errback: Optional callback called each time the connection + can't be established. Arguments provided are the exception + raised and the interval that will be slept ``(exc, interval)``. + + :keyword max_retries: Maximum number of times to retry. + If this limit is exceeded the connection error will be re-raised. + + :keyword interval_start: The number of seconds we start sleeping for. + :keyword interval_step: How many seconds added to the interval + for each retry. + :keyword interval_max: Maximum number of seconds to sleep between + each retry. + :keyword callback: Optional callback that is called for every + internal iteration (1 s) + + """ + def on_error(exc, intervals, retries, interval=0): + round = self.completes_cycle(retries) + if round: + interval = next(intervals) + if errback: + errback(exc, interval) + self.maybe_switch_next() # select next host + + return interval if round else 0 + + retry_over_time(self.connect, self.recoverable_connection_errors, + (), {}, on_error, max_retries, + interval_start, interval_step, interval_max, callback) + return self + + def completes_cycle(self, retries): + """Return true if the cycle is complete after number of `retries`.""" + return not (retries + 1) % len(self.alt) if self.alt else True + + def revive(self, new_channel): + """Revive connection after connection re-established.""" + if self._default_channel: + self.maybe_close_channel(self._default_channel) + self._default_channel = None + + def _default_ensure_callback(self, exc, interval): + logger.error("Ensure: Operation error: %r. Retry in %ss", + exc, interval, exc_info=True) + + def ensure(self, obj, fun, errback=None, max_retries=None, + interval_start=1, interval_step=1, interval_max=1, + on_revive=None): + """Ensure operation completes, regardless of any channel/connection + errors occurring. + + Will retry by establishing the connection, and reapplying + the function. + + :param fun: Method to apply. + + :keyword errback: Optional callback called each time the connection + can't be established. Arguments provided are the exception + raised and the interval that will be slept ``(exc, interval)``. + + :keyword max_retries: Maximum number of times to retry. + If this limit is exceeded the connection error will be re-raised. + + :keyword interval_start: The number of seconds we start sleeping for. + :keyword interval_step: How many seconds added to the interval + for each retry. + :keyword interval_max: Maximum number of seconds to sleep between + each retry. + + **Example** + + This is an example ensuring a publish operation:: + + >>> from kombu import Connection, Producer + >>> conn = Connection('amqp://') + >>> producer = Producer(conn) + + >>> def errback(exc, interval): + ... logger.error('Error: %r', exc, exc_info=1) + ... logger.info('Retry in %s seconds.', interval) + + >>> publish = conn.ensure(producer, producer.publish, + ... errback=errback, max_retries=3) + >>> publish({'hello': 'world'}, routing_key='dest') + + """ + def _ensured(*args, **kwargs): + got_connection = 0 + conn_errors = self.recoverable_connection_errors + chan_errors = self.recoverable_channel_errors + has_modern_errors = hasattr( + self.transport, 'recoverable_connection_errors', + ) + for retries in count(0): # for infinity + try: + return fun(*args, **kwargs) + except conn_errors as exc: + if got_connection and not has_modern_errors: + # transport can not distinguish between + # recoverable/irrecoverable errors, so we propagate + # the error if it persists after a new connection was + # successfully established. + raise + if max_retries is not None and retries > max_retries: + raise + self._debug('ensure connection error: %r', exc, exc_info=1) + self._connection = None + self._do_close_self() + errback and errback(exc, 0) + remaining_retries = None + if max_retries is not None: + remaining_retries = max(max_retries - retries, 1) + self.ensure_connection(errback, + remaining_retries, + interval_start, + interval_step, + interval_max) + new_channel = self.channel() + self.revive(new_channel) + obj.revive(new_channel) + if on_revive: + on_revive(new_channel) + got_connection += 1 + except chan_errors as exc: + if max_retries is not None and retries > max_retries: + raise + self._debug('ensure channel error: %r', exc, exc_info=1) + errback and errback(exc, 0) + _ensured.__name__ = "%s(ensured)" % fun.__name__ + _ensured.__doc__ = fun.__doc__ + _ensured.__module__ = fun.__module__ + return _ensured + + def autoretry(self, fun, channel=None, **ensure_options): + """Decorator for functions supporting a ``channel`` keyword argument. + + The resulting callable will retry calling the function if + it raises connection or channel related errors. + The return value will be a tuple of ``(retval, last_created_channel)``. + + If a ``channel`` is not provided, then one will be automatically + acquired (remember to close it afterwards). + + See :meth:`ensure` for the full list of supported keyword arguments. + + Example usage:: + + channel = connection.channel() + try: + ret, channel = connection.autoretry(publish_messages, channel) + finally: + channel.close() + """ + channels = [channel] + create_channel = self.channel + + class Revival(object): + __name__ = fun.__name__ + __module__ = fun.__module__ + __doc__ = fun.__doc__ + + def revive(self, channel): + channels[0] = channel + + def __call__(self, *args, **kwargs): + if channels[0] is None: + self.revive(create_channel()) + return fun(*args, channel=channels[0], **kwargs), channels[0] + + revive = Revival() + return self.ensure(revive, revive, **ensure_options) + + def create_transport(self): + return self.get_transport_cls()(client=self) + + def get_transport_cls(self): + """Get the currently used transport class.""" + transport_cls = self.transport_cls + if not transport_cls or isinstance(transport_cls, string_t): + transport_cls = get_transport_cls(transport_cls) + return transport_cls + + def clone(self, **kwargs): + """Create a copy of the connection with the same connection + settings.""" + return self.__class__(**dict(self._info(resolve=False), **kwargs)) + + def get_heartbeat_interval(self): + return self.transport.get_heartbeat_interval(self.connection) + + def _info(self, resolve=True): + transport_cls = self.transport_cls + if resolve: + transport_cls = RESOLVE_ALIASES.get(transport_cls, transport_cls) + D = self.transport.default_connection_params + + hostname = self.hostname or D.get('hostname') + if self.uri_prefix: + hostname = '%s+%s' % (self.uri_prefix, hostname) + + info = ( + ('hostname', hostname), + ('userid', self.userid or D.get('userid')), + ('password', self.password or D.get('password')), + ('virtual_host', self.virtual_host or D.get('virtual_host')), + ('port', self.port or D.get('port')), + ('insist', self.insist), + ('ssl', self.ssl), + ('transport', transport_cls), + ('connect_timeout', self.connect_timeout), + ('transport_options', self.transport_options), + ('login_method', self.login_method or D.get('login_method')), + ('uri_prefix', self.uri_prefix), + ('heartbeat', self.heartbeat), + ('alternates', self.alt), + ) + return info + + def info(self): + """Get connection info.""" + return OrderedDict(self._info()) + + def __eqhash__(self): + return HashedSeq(self.transport_cls, self.hostname, self.userid, + self.password, self.virtual_host, self.port, + repr(self.transport_options)) + + def as_uri(self, include_password=False, mask='**', + getfields=itemgetter('port', 'userid', 'password', + 'virtual_host', 'transport')): + """Convert connection parameters to URL form.""" + hostname = self.hostname or 'localhost' + if self.transport.can_parse_url: + if self.uri_prefix: + return '%s+%s' % (self.uri_prefix, hostname) + return self.hostname + fields = self.info() + port, userid, password, vhost, transport = getfields(fields) + scheme = ('{0}+{1}'.format(self.uri_prefix, transport) + if self.uri_prefix else transport) + return as_url( + scheme, hostname, port, userid, password, quote(vhost), + sanitize=not include_password, mask=mask, + ) + + def Pool(self, limit=None, preload=None): + """Pool of connections. + + See :class:`ConnectionPool`. + + :keyword limit: Maximum number of active connections. + Default is no limit. + :keyword preload: Number of connections to preload + when the pool is created. Default is 0. + + *Example usage*:: + + >>> connection = Connection('amqp://') + >>> pool = connection.Pool(2) + >>> c1 = pool.acquire() + >>> c2 = pool.acquire() + >>> c3 = pool.acquire() + Traceback (most recent call last): + File "", line 1, in + File "kombu/connection.py", line 354, in acquire + raise ConnectionLimitExceeded(self.limit) + kombu.exceptions.ConnectionLimitExceeded: 2 + >>> c1.release() + >>> c3 = pool.acquire() + + """ + return ConnectionPool(self, limit, preload) + + def ChannelPool(self, limit=None, preload=None): + """Pool of channels. + + See :class:`ChannelPool`. + + :keyword limit: Maximum number of active channels. + Default is no limit. + :keyword preload: Number of channels to preload + when the pool is created. Default is 0. + + *Example usage*:: + + >>> connection = Connection('amqp://') + >>> pool = connection.ChannelPool(2) + >>> c1 = pool.acquire() + >>> c2 = pool.acquire() + >>> c3 = pool.acquire() + Traceback (most recent call last): + File "", line 1, in + File "kombu/connection.py", line 354, in acquire + raise ChannelLimitExceeded(self.limit) + kombu.connection.ChannelLimitExceeded: 2 + >>> c1.release() + >>> c3 = pool.acquire() + + """ + return ChannelPool(self, limit, preload) + + def Producer(self, channel=None, *args, **kwargs): + """Create new :class:`kombu.Producer` instance using this + connection.""" + from .messaging import Producer + return Producer(channel or self, *args, **kwargs) + + def Consumer(self, queues=None, channel=None, *args, **kwargs): + """Create new :class:`kombu.Consumer` instance using this + connection.""" + from .messaging import Consumer + return Consumer(channel or self, queues, *args, **kwargs) + + def SimpleQueue(self, name, no_ack=None, queue_opts=None, + exchange_opts=None, channel=None, **kwargs): + """Create new :class:`~kombu.simple.SimpleQueue`, using a channel + from this connection. + + If ``name`` is a string, a queue and exchange will be automatically + created using that name as the name of the queue and exchange, + also it will be used as the default routing key. + + :param name: Name of the queue/or a :class:`~kombu.Queue`. + :keyword no_ack: Disable acknowledgements. Default is false. + :keyword queue_opts: Additional keyword arguments passed to the + constructor of the automatically created + :class:`~kombu.Queue`. + :keyword exchange_opts: Additional keyword arguments passed to the + constructor of the automatically created + :class:`~kombu.Exchange`. + :keyword channel: Custom channel to use. If not specified the + connection default channel is used. + + """ + from .simple import SimpleQueue + return SimpleQueue(channel or self, name, no_ack, queue_opts, + exchange_opts, **kwargs) + + def SimpleBuffer(self, name, no_ack=None, queue_opts=None, + exchange_opts=None, channel=None, **kwargs): + """Create new :class:`~kombu.simple.SimpleQueue` using a channel + from this connection. + + Same as :meth:`SimpleQueue`, but configured with buffering + semantics. The resulting queue and exchange will not be durable, also + auto delete is enabled. Messages will be transient (not persistent), + and acknowledgements are disabled (``no_ack``). + + """ + from .simple import SimpleBuffer + return SimpleBuffer(channel or self, name, no_ack, queue_opts, + exchange_opts, **kwargs) + + def _establish_connection(self): + self._debug('establishing connection...') + conn = self.transport.establish_connection() + self._debug('connection established: %r', conn) + return conn + + def __repr__(self): + """``x.__repr__() <==> repr(x)``""" + return ''.format(self.as_uri(), id(self)) + + def __copy__(self): + """``x.__copy__() <==> copy(x)``""" + return self.clone() + + def __reduce__(self): + return self.__class__, tuple(self.info().values()), None + + def __enter__(self): + return self + + def __exit__(self, *args): + self.release() + + @property + def qos_semantics_matches_spec(self): + return self.transport.qos_semantics_matches_spec(self.connection) + + @property + def connected(self): + """Return true if the connection has been established.""" + return (not self._closed and + self._connection is not None and + self.transport.verify_connection(self._connection)) + + @property + def connection(self): + """The underlying connection object. + + .. warning:: + This instance is transport specific, so do not + depend on the interface of this object. + + """ + if not self._closed: + if not self.connected: + self.declared_entities.clear() + self._default_channel = None + self._connection = self._establish_connection() + self._closed = False + return self._connection + + @property + def default_channel(self): + """Default channel, created upon access and closed when the connection + is closed. + + Can be used for automatic channel handling when you only need one + channel, and also it is the channel implicitly used if a connection + is passed instead of a channel, to functions that require a channel. + + """ + # make sure we're still connected, and if not refresh. + self.connection + if self._default_channel is None: + self._default_channel = self.channel() + return self._default_channel + + @property + def host(self): + """The host as a host name/port pair separated by colon.""" + return ':'.join([self.hostname, str(self.port)]) + + @property + def transport(self): + if self._transport is None: + self._transport = self.create_transport() + return self._transport + + @cached_property + def manager(self): + """Experimental manager that can be used to manage/monitor the broker + instance. Not available for all transports.""" + return self.transport.manager + + def get_manager(self, *args, **kwargs): + return self.transport.get_manager(*args, **kwargs) + + @cached_property + def recoverable_connection_errors(self): + """List of connection related exceptions that can be recovered from, + but where the connection must be closed and re-established first.""" + try: + return self.transport.recoverable_connection_errors + except AttributeError: + # There were no such classification before, + # and all errors were assumed to be recoverable, + # so this is a fallback for transports that do + # not support the new recoverable/irrecoverable classes. + return self.connection_errors + self.channel_errors + + @cached_property + def recoverable_channel_errors(self): + """List of channel related exceptions that can be automatically + recovered from without re-establishing the connection.""" + try: + return self.transport.recoverable_channel_errors + except AttributeError: + return () + + @cached_property + def connection_errors(self): + """List of exceptions that may be raised by the connection.""" + return self.transport.connection_errors + + @cached_property + def channel_errors(self): + """List of exceptions that may be raised by the channel.""" + return self.transport.channel_errors + + @property + def supports_heartbeats(self): + return self.transport.supports_heartbeats + + @property + def is_evented(self): + return self.transport.supports_ev +BrokerConnection = Connection + + +class Resource(object): + LimitExceeded = exceptions.LimitExceeded + + def __init__(self, limit=None, preload=None): + self.limit = limit + self.preload = preload or 0 + self._closed = False + + self._resource = _LifoQueue() + self._dirty = set() + self.setup() + + def setup(self): + raise NotImplementedError('subclass responsibility') + + def _add_when_empty(self): + if self.limit and len(self._dirty) >= self.limit: + raise self.LimitExceeded(self.limit) + # All taken, put new on the queue and + # try get again, this way the first in line + # will get the resource. + self._resource.put_nowait(self.new()) + + def acquire(self, block=False, timeout=None): + """Acquire resource. + + :keyword block: If the limit is exceeded, + block until there is an available item. + :keyword timeout: Timeout to wait + if ``block`` is true. Default is :const:`None` (forever). + + :raises LimitExceeded: if block is false + and the limit has been exceeded. + + """ + if self._closed: + raise RuntimeError('Acquire on closed pool') + if self.limit: + while 1: + try: + R = self._resource.get(block=block, timeout=timeout) + except Empty: + self._add_when_empty() + else: + try: + R = self.prepare(R) + except BaseException: + if isinstance(R, lazy): + # no evaluated yet, just put it back + self._resource.put_nowait(R) + else: + # evaluted so must try to release/close first. + self.release(R) + raise + self._dirty.add(R) + break + else: + R = self.prepare(self.new()) + + def release(): + """Release resource so it can be used by another thread. + + The caller is responsible for discarding the object, + and to never use the resource again. A new resource must + be acquired if so needed. + + """ + self.release(R) + R.release = release + + return R + + def prepare(self, resource): + return resource + + def close_resource(self, resource): + resource.close() + + def release_resource(self, resource): + pass + + def replace(self, resource): + """Replace resource with a new instance. This can be used in case + of defective resources.""" + if self.limit: + self._dirty.discard(resource) + self.close_resource(resource) + + def release(self, resource): + if self.limit: + self._dirty.discard(resource) + self._resource.put_nowait(resource) + self.release_resource(resource) + else: + self.close_resource(resource) + + def collect_resource(self, resource): + pass + + def force_close_all(self): + """Close and remove all resources in the pool (also those in use). + + Can be used to close resources from parent processes + after fork (e.g. sockets/connections). + + """ + self._closed = True + dirty = self._dirty + resource = self._resource + while 1: # - acquired + try: + dres = dirty.pop() + except KeyError: + break + try: + self.collect_resource(dres) + except AttributeError: # Issue #78 + pass + while 1: # - available + # deque supports '.clear', but lists do not, so for that + # reason we use pop here, so that the underlying object can + # be any object supporting '.pop' and '.append'. + try: + res = resource.queue.pop() + except IndexError: + break + try: + self.collect_resource(res) + except AttributeError: + pass # Issue #78 + + if os.environ.get('KOMBU_DEBUG_POOL'): # pragma: no cover + _orig_acquire = acquire + _orig_release = release + + _next_resource_id = 0 + + def acquire(self, *args, **kwargs): # noqa + import traceback + id = self._next_resource_id = self._next_resource_id + 1 + print('+{0} ACQUIRE {1}'.format(id, self.__class__.__name__)) + r = self._orig_acquire(*args, **kwargs) + r._resource_id = id + print('-{0} ACQUIRE {1}'.format(id, self.__class__.__name__)) + if not hasattr(r, 'acquired_by'): + r.acquired_by = [] + r.acquired_by.append(traceback.format_stack()) + return r + + def release(self, resource): # noqa + id = resource._resource_id + print('+{0} RELEASE {1}'.format(id, self.__class__.__name__)) + r = self._orig_release(resource) + print('-{0} RELEASE {1}'.format(id, self.__class__.__name__)) + self._next_resource_id -= 1 + return r + + +class ConnectionPool(Resource): + LimitExceeded = exceptions.ConnectionLimitExceeded + + def __init__(self, connection, limit=None, preload=None): + self.connection = connection + super(ConnectionPool, self).__init__(limit=limit, + preload=preload) + + def new(self): + return self.connection.clone() + + def release_resource(self, resource): + try: + resource._debug('released') + except AttributeError: + pass + + def close_resource(self, resource): + resource._close() + + def collect_resource(self, resource, socket_timeout=0.1): + return resource.collect(socket_timeout) + + @contextmanager + def acquire_channel(self, block=False): + with self.acquire(block=block) as connection: + yield connection, connection.default_channel + + def setup(self): + if self.limit: + for i in range(self.limit): + if i < self.preload: + conn = self.new() + conn.connect() + else: + conn = lazy(self.new) + self._resource.put_nowait(conn) + + def prepare(self, resource): + if callable(resource): + resource = resource() + resource._debug('acquired') + return resource + + +class ChannelPool(Resource): + LimitExceeded = exceptions.ChannelLimitExceeded + + def __init__(self, connection, limit=None, preload=None): + self.connection = connection + super(ChannelPool, self).__init__(limit=limit, + preload=preload) + + def new(self): + return lazy(self.connection.channel) + + def setup(self): + channel = self.new() + if self.limit: + for i in range(self.limit): + self._resource.put_nowait( + i < self.preload and channel() or lazy(channel)) + + def prepare(self, channel): + if callable(channel): + channel = channel() + return channel + + +def maybe_channel(channel): + """Return the default channel if argument is a connection instance, + otherwise just return the channel given.""" + if isinstance(channel, Connection): + return channel.default_channel + return channel + + +def is_connection(obj): + return isinstance(obj, Connection) diff --git a/kombu/entity.py b/kombu/entity.py new file mode 100644 index 0000000..c27316c --- /dev/null +++ b/kombu/entity.py @@ -0,0 +1,718 @@ +""" +kombu.entity +================ + +Exchange and Queue declarations. + +""" +from __future__ import absolute_import + +from .abstract import MaybeChannelBound +from .exceptions import ContentDisallowed +from .serialization import prepare_accept_content + +TRANSIENT_DELIVERY_MODE = 1 +PERSISTENT_DELIVERY_MODE = 2 +DELIVERY_MODES = {'transient': TRANSIENT_DELIVERY_MODE, + 'persistent': PERSISTENT_DELIVERY_MODE} + +__all__ = ['Exchange', 'Queue', 'binding'] + + +def pretty_bindings(bindings): + return '[%s]' % (', '.join(map(str, bindings))) + + +class Exchange(MaybeChannelBound): + """An Exchange declaration. + + :keyword name: See :attr:`name`. + :keyword type: See :attr:`type`. + :keyword channel: See :attr:`channel`. + :keyword durable: See :attr:`durable`. + :keyword auto_delete: See :attr:`auto_delete`. + :keyword delivery_mode: See :attr:`delivery_mode`. + :keyword arguments: See :attr:`arguments`. + + .. attribute:: name + + Name of the exchange. Default is no name (the default exchange). + + .. attribute:: type + + *This description of AMQP exchange types was shamelessly stolen + from the blog post `AMQP in 10 minutes: Part 4`_ by + Rajith Attapattu. Reading this article is recommended if you're + new to amqp.* + + "AMQP defines four default exchange types (routing algorithms) that + covers most of the common messaging use cases. An AMQP broker can + also define additional exchange types, so see your broker + manual for more information about available exchange types. + + * `direct` (*default*) + + Direct match between the routing key in the message, and the + routing criteria used when a queue is bound to this exchange. + + * `topic` + + Wildcard match between the routing key and the routing pattern + specified in the exchange/queue binding. The routing key is + treated as zero or more words delimited by `"."` and + supports special wildcard characters. `"*"` matches a + single word and `"#"` matches zero or more words. + + * `fanout` + + Queues are bound to this exchange with no arguments. Hence any + message sent to this exchange will be forwarded to all queues + bound to this exchange. + + * `headers` + + Queues are bound to this exchange with a table of arguments + containing headers and values (optional). A special argument + named "x-match" determines the matching algorithm, where + `"all"` implies an `AND` (all pairs must match) and + `"any"` implies `OR` (at least one pair must match). + + :attr:`arguments` is used to specify the arguments. + + + .. _`AMQP in 10 minutes: Part 4`: + http://bit.ly/amqp-exchange-types + + .. attribute:: channel + + The channel the exchange is bound to (if bound). + + .. attribute:: durable + + Durable exchanges remain active when a server restarts. Non-durable + exchanges (transient exchanges) are purged when a server restarts. + Default is :const:`True`. + + .. attribute:: auto_delete + + If set, the exchange is deleted when all queues have finished + using it. Default is :const:`False`. + + .. attribute:: delivery_mode + + The default delivery mode used for messages. The value is an integer, + or alias string. + + * 1 or `"transient"` + + The message is transient. Which means it is stored in + memory only, and is lost if the server dies or restarts. + + * 2 or "persistent" (*default*) + The message is persistent. Which means the message is + stored both in-memory, and on disk, and therefore + preserved if the server dies or restarts. + + The default value is 2 (persistent). + + .. attribute:: arguments + + Additional arguments to specify when the exchange is declared. + + """ + TRANSIENT_DELIVERY_MODE = TRANSIENT_DELIVERY_MODE + PERSISTENT_DELIVERY_MODE = PERSISTENT_DELIVERY_MODE + + name = '' + type = 'direct' + durable = True + auto_delete = False + passive = False + delivery_mode = PERSISTENT_DELIVERY_MODE + + attrs = ( + ('name', None), + ('type', None), + ('arguments', None), + ('durable', bool), + ('passive', bool), + ('auto_delete', bool), + ('delivery_mode', lambda m: DELIVERY_MODES.get(m) or m), + ) + + def __init__(self, name='', type='', channel=None, **kwargs): + super(Exchange, self).__init__(**kwargs) + self.name = name or self.name + self.type = type or self.type + self.maybe_bind(channel) + + def __hash__(self): + return hash('E|%s' % (self.name, )) + + def declare(self, nowait=False, passive=None): + """Declare the exchange. + + Creates the exchange on the broker. + + :keyword nowait: If set the server will not respond, and a + response will not be waited for. Default is :const:`False`. + + """ + passive = self.passive if passive is None else passive + if self.name: + return self.channel.exchange_declare( + exchange=self.name, type=self.type, durable=self.durable, + auto_delete=self.auto_delete, arguments=self.arguments, + nowait=nowait, passive=passive, + ) + + def bind_to(self, exchange='', routing_key='', + arguments=None, nowait=False, **kwargs): + """Binds the exchange to another exchange. + + :keyword nowait: If set the server will not respond, and the call + will not block waiting for a response. Default is :const:`False`. + + """ + if isinstance(exchange, Exchange): + exchange = exchange.name + return self.channel.exchange_bind(destination=self.name, + source=exchange, + routing_key=routing_key, + nowait=nowait, + arguments=arguments) + + def unbind_from(self, source='', routing_key='', + nowait=False, arguments=None): + """Delete previously created exchange binding from the server.""" + if isinstance(source, Exchange): + source = source.name + return self.channel.exchange_unbind(destination=self.name, + source=source, + routing_key=routing_key, + nowait=nowait, + arguments=arguments) + + def Message(self, body, delivery_mode=None, priority=None, + content_type=None, content_encoding=None, + properties=None, headers=None): + """Create message instance to be sent with :meth:`publish`. + + :param body: Message body. + + :keyword delivery_mode: Set custom delivery mode. Defaults + to :attr:`delivery_mode`. + + :keyword priority: Message priority, 0 to 9. (currently not + supported by RabbitMQ). + + :keyword content_type: The messages content_type. If content_type + is set, no serialization occurs as it is assumed this is either + a binary object, or you've done your own serialization. + Leave blank if using built-in serialization as our library + properly sets content_type. + + :keyword content_encoding: The character set in which this object + is encoded. Use "binary" if sending in raw binary objects. + Leave blank if using built-in serialization as our library + properly sets content_encoding. + + :keyword properties: Message properties. + + :keyword headers: Message headers. + + """ + properties = {} if properties is None else properties + dm = delivery_mode or self.delivery_mode + properties['delivery_mode'] = \ + DELIVERY_MODES[dm] if (dm != 2 and dm != 1) else dm + return self.channel.prepare_message(body, + properties=properties, + priority=priority, + content_type=content_type, + content_encoding=content_encoding, + headers=headers) + + def publish(self, message, routing_key=None, mandatory=False, + immediate=False, exchange=None): + """Publish message. + + :param message: :meth:`Message` instance to publish. + :param routing_key: Routing key. + :param mandatory: Currently not supported. + :param immediate: Currently not supported. + + """ + exchange = exchange or self.name + return self.channel.basic_publish(message, + exchange=exchange, + routing_key=routing_key, + mandatory=mandatory, + immediate=immediate) + + def delete(self, if_unused=False, nowait=False): + """Delete the exchange declaration on server. + + :keyword if_unused: Delete only if the exchange has no bindings. + Default is :const:`False`. + + :keyword nowait: If set the server will not respond, and a + response will not be waited for. Default is :const:`False`. + + """ + return self.channel.exchange_delete(exchange=self.name, + if_unused=if_unused, + nowait=nowait) + + def binding(self, routing_key='', arguments=None, unbind_arguments=None): + return binding(self, routing_key, arguments, unbind_arguments) + + def __eq__(self, other): + if isinstance(other, Exchange): + return (self.name == other.name and + self.type == other.type and + self.arguments == other.arguments and + self.durable == other.durable and + self.auto_delete == other.auto_delete and + self.delivery_mode == other.delivery_mode) + return NotImplemented + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return super(Exchange, self).__repr__(str(self)) + + def __str__(self): + return 'Exchange %s(%s)' % (self.name or repr(''), self.type) + + @property + def can_cache_declaration(self): + return not self.auto_delete + + +class binding(object): + """Represents a queue or exchange binding. + + :keyword exchange: Exchange to bind to. + :keyword routing_key: Routing key used as binding key. + :keyword arguments: Arguments for bind operation. + :keyword unbind_arguments: Arguments for unbind operation. + + """ + + def __init__(self, exchange=None, routing_key='', + arguments=None, unbind_arguments=None): + self.exchange = exchange + self.routing_key = routing_key + self.arguments = arguments + self.unbind_arguments = unbind_arguments + + def declare(self, channel, nowait=False): + """Declare destination exchange.""" + if self.exchange and self.exchange.name: + ex = self.exchange(channel) + ex.declare(nowait=nowait) + + def bind(self, entity, nowait=False): + """Bind entity to this binding.""" + entity.bind_to(exchange=self.exchange, + routing_key=self.routing_key, + arguments=self.arguments, + nowait=nowait) + + def unbind(self, entity, nowait=False): + """Unbind entity from this binding.""" + entity.unbind_from(self.exchange, + routing_key=self.routing_key, + arguments=self.unbind_arguments, + nowait=nowait) + + def __repr__(self): + return '' % (self, ) + + def __str__(self): + return '%s->%s' % (self.exchange.name, self.routing_key) + + +class Queue(MaybeChannelBound): + """A Queue declaration. + + :keyword name: See :attr:`name`. + :keyword exchange: See :attr:`exchange`. + :keyword routing_key: See :attr:`routing_key`. + :keyword channel: See :attr:`channel`. + :keyword durable: See :attr:`durable`. + :keyword exclusive: See :attr:`exclusive`. + :keyword auto_delete: See :attr:`auto_delete`. + :keyword queue_arguments: See :attr:`queue_arguments`. + :keyword binding_arguments: See :attr:`binding_arguments`. + :keyword on_declared: See :attr:`on_declared` + + .. attribute:: name + + Name of the queue. Default is no name (default queue destination). + + .. attribute:: exchange + + The :class:`Exchange` the queue binds to. + + .. attribute:: routing_key + + The routing key (if any), also called *binding key*. + + The interpretation of the routing key depends on + the :attr:`Exchange.type`. + + * direct exchange + + Matches if the routing key property of the message and + the :attr:`routing_key` attribute are identical. + + * fanout exchange + + Always matches, even if the binding does not have a key. + + * topic exchange + + Matches the routing key property of the message by a primitive + pattern matching scheme. The message routing key then consists + of words separated by dots (`"."`, like domain names), and + two special characters are available; star (`"*"`) and hash + (`"#"`). The star matches any word, and the hash matches + zero or more words. For example `"*.stock.#"` matches the + routing keys `"usd.stock"` and `"eur.stock.db"` but not + `"stock.nasdaq"`. + + .. attribute:: channel + + The channel the Queue is bound to (if bound). + + .. attribute:: durable + + Durable queues remain active when a server restarts. + Non-durable queues (transient queues) are purged if/when + a server restarts. + Note that durable queues do not necessarily hold persistent + messages, although it does not make sense to send + persistent messages to a transient queue. + + Default is :const:`True`. + + .. attribute:: exclusive + + Exclusive queues may only be consumed from by the + current connection. Setting the 'exclusive' flag + always implies 'auto-delete'. + + Default is :const:`False`. + + .. attribute:: auto_delete + + If set, the queue is deleted when all consumers have + finished using it. Last consumer can be cancelled + either explicitly or because its channel is closed. If + there was no consumer ever on the queue, it won't be + deleted. + + .. attribute:: queue_arguments + + Additional arguments used when declaring the queue. + + .. attribute:: binding_arguments + + Additional arguments used when binding the queue. + + .. attribute:: alias + + Unused in Kombu, but applications can take advantage of this. + For example to give alternate names to queues with automatically + generated queue names. + + .. attribute:: on_declared + + Optional callback to be applied when the queue has been + declared (the ``queue_declare`` operation is complete). + This must be a function with a signature that accepts at least 3 + positional arguments: ``(name, messages, consumers)``. + + """ + ContentDisallowed = ContentDisallowed + + name = '' + exchange = Exchange('') + routing_key = '' + + durable = True + exclusive = False + auto_delete = False + no_ack = False + + attrs = ( + ('name', None), + ('exchange', None), + ('routing_key', None), + ('queue_arguments', None), + ('binding_arguments', None), + ('durable', bool), + ('exclusive', bool), + ('auto_delete', bool), + ('no_ack', None), + ('alias', None), + ('bindings', list), + ) + + def __init__(self, name='', exchange=None, routing_key='', + channel=None, bindings=None, on_declared=None, + **kwargs): + super(Queue, self).__init__(**kwargs) + self.name = name or self.name + self.exchange = exchange or self.exchange + self.routing_key = routing_key or self.routing_key + self.bindings = set(bindings or []) + self.on_declared = on_declared + + # allows Queue('name', [binding(...), binding(...), ...]) + if isinstance(exchange, (list, tuple, set)): + self.bindings |= set(exchange) + if self.bindings: + self.exchange = None + + # exclusive implies auto-delete. + if self.exclusive: + self.auto_delete = True + self.maybe_bind(channel) + + def bind(self, channel): + on_declared = self.on_declared + bound = super(Queue, self).bind(channel) + bound.on_declared = on_declared + return bound + + def __hash__(self): + return hash('Q|%s' % (self.name, )) + + def when_bound(self): + if self.exchange: + self.exchange = self.exchange(self.channel) + + def declare(self, nowait=False): + """Declares the queue, the exchange and binds the queue to + the exchange.""" + # - declare main binding. + if self.exchange: + self.exchange.declare(nowait) + self.queue_declare(nowait, passive=False) + + if self.exchange and self.exchange.name: + self.queue_bind(nowait) + + # - declare extra/multi-bindings. + for B in self.bindings: + B.declare(self.channel) + B.bind(self, nowait=nowait) + return self.name + + def queue_declare(self, nowait=False, passive=False): + """Declare queue on the server. + + :keyword nowait: Do not wait for a reply. + :keyword passive: If set, the server will not create the queue. + The client can use this to check whether a queue exists + without modifying the server state. + + """ + ret = self.channel.queue_declare(queue=self.name, + passive=passive, + durable=self.durable, + exclusive=self.exclusive, + auto_delete=self.auto_delete, + arguments=self.queue_arguments, + nowait=nowait) + if not self.name: + self.name = ret[0] + if self.on_declared: + self.on_declared(*ret) + return ret + + def queue_bind(self, nowait=False): + """Create the queue binding on the server.""" + return self.bind_to(self.exchange, self.routing_key, + self.binding_arguments, nowait=nowait) + + def bind_to(self, exchange='', routing_key='', + arguments=None, nowait=False): + if isinstance(exchange, Exchange): + exchange = exchange.name + return self.channel.queue_bind(queue=self.name, + exchange=exchange, + routing_key=routing_key, + arguments=arguments, + nowait=nowait) + + def get(self, no_ack=None, accept=None): + """Poll the server for a new message. + + Must return the message if a message was available, + or :const:`None` otherwise. + + :keyword no_ack: If enabled the broker will automatically + ack messages. + :keyword accept: Custom list of accepted content types. + + This method provides direct access to the messages in a + queue using a synchronous dialogue, designed for + specific types of applications where synchronous functionality + is more important than performance. + + """ + no_ack = self.no_ack if no_ack is None else no_ack + message = self.channel.basic_get(queue=self.name, no_ack=no_ack) + if message is not None: + m2p = getattr(self.channel, 'message_to_python', None) + if m2p: + message = m2p(message) + if message.errors: + message._reraise_error() + message.accept = prepare_accept_content(accept) + return message + + def purge(self, nowait=False): + """Remove all ready messages from the queue.""" + return self.channel.queue_purge(queue=self.name, + nowait=nowait) or 0 + + def consume(self, consumer_tag='', callback=None, + no_ack=None, nowait=False): + """Start a queue consumer. + + Consumers last as long as the channel they were created on, or + until the client cancels them. + + :keyword consumer_tag: Unique identifier for the consumer. The + consumer tag is local to a connection, so two clients + can use the same consumer tags. If this field is empty + the server will generate a unique tag. + + :keyword no_ack: If enabled the broker will automatically ack + messages. + + :keyword nowait: Do not wait for a reply. + + :keyword callback: callback called for each delivered message + + """ + if no_ack is None: + no_ack = self.no_ack + return self.channel.basic_consume(queue=self.name, + no_ack=no_ack, + consumer_tag=consumer_tag or '', + callback=callback, + nowait=nowait) + + def cancel(self, consumer_tag): + """Cancel a consumer by consumer tag.""" + return self.channel.basic_cancel(consumer_tag) + + def delete(self, if_unused=False, if_empty=False, nowait=False): + """Delete the queue. + + :keyword if_unused: If set, the server will only delete the queue + if it has no consumers. A channel error will be raised + if the queue has consumers. + + :keyword if_empty: If set, the server will only delete the queue + if it is empty. If it is not empty a channel error will be raised. + + :keyword nowait: Do not wait for a reply. + + """ + return self.channel.queue_delete(queue=self.name, + if_unused=if_unused, + if_empty=if_empty, + nowait=nowait) + + def queue_unbind(self, arguments=None, nowait=False): + return self.unbind_from(self.exchange, self.routing_key, + arguments, nowait) + + def unbind_from(self, exchange='', routing_key='', + arguments=None, nowait=False): + """Unbind queue by deleting the binding from the server.""" + return self.channel.queue_unbind(queue=self.name, + exchange=exchange.name, + routing_key=routing_key, + arguments=arguments, + nowait=nowait) + + def __eq__(self, other): + if isinstance(other, Queue): + return (self.name == other.name and + self.exchange == other.exchange and + self.routing_key == other.routing_key and + self.queue_arguments == other.queue_arguments and + self.binding_arguments == other.binding_arguments and + self.durable == other.durable and + self.exclusive == other.exclusive and + self.auto_delete == other.auto_delete) + return NotImplemented + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + s = super(Queue, self).__repr__ + if self.bindings: + return s('Queue {0.name} -> {bindings}'.format( + self, bindings=pretty_bindings(self.bindings), + )) + return s( + 'Queue {0.name} -> {0.exchange!r} -> {0.routing_key}'.format( + self)) + + @property + def can_cache_declaration(self): + return not self.auto_delete + + @classmethod + def from_dict(self, queue, **options): + binding_key = options.get('binding_key') or options.get('routing_key') + + e_durable = options.get('exchange_durable') + if e_durable is None: + e_durable = options.get('durable') + + e_auto_delete = options.get('exchange_auto_delete') + if e_auto_delete is None: + e_auto_delete = options.get('auto_delete') + + q_durable = options.get('queue_durable') + if q_durable is None: + q_durable = options.get('durable') + + q_auto_delete = options.get('queue_auto_delete') + if q_auto_delete is None: + q_auto_delete = options.get('auto_delete') + + e_arguments = options.get('exchange_arguments') + q_arguments = options.get('queue_arguments') + b_arguments = options.get('binding_arguments') + bindings = options.get('bindings') + + exchange = Exchange(options.get('exchange'), + type=options.get('exchange_type'), + delivery_mode=options.get('delivery_mode'), + routing_key=options.get('routing_key'), + durable=e_durable, + auto_delete=e_auto_delete, + arguments=e_arguments) + return Queue(queue, + exchange=exchange, + routing_key=binding_key, + durable=q_durable, + exclusive=options.get('exclusive'), + auto_delete=q_auto_delete, + no_ack=options.get('no_ack'), + queue_arguments=q_arguments, + binding_arguments=b_arguments, + bindings=bindings) diff --git a/kombu/exceptions.py b/kombu/exceptions.py new file mode 100644 index 0000000..716bc69 --- /dev/null +++ b/kombu/exceptions.py @@ -0,0 +1,83 @@ +""" +kombu.exceptions +================ + +Exceptions. + +""" +from __future__ import absolute_import + +import socket + +from amqp import ChannelError, ConnectionError, ResourceError + +__all__ = ['NotBoundError', 'MessageStateError', 'TimeoutError', + 'LimitExceeded', 'ConnectionLimitExceeded', + 'ChannelLimitExceeded', 'ConnectionError', 'ChannelError', + 'VersionMismatch', 'SerializerNotInstalled', 'ResourceError', + 'SerializationError', 'EncodeError', 'DecodeError'] + +TimeoutError = socket.timeout + + +class KombuError(Exception): + """Common subclass for all Kombu exceptions.""" + pass + + +class SerializationError(KombuError): + """Failed to serialize/deserialize content.""" + + +class EncodeError(SerializationError): + """Cannot encode object.""" + pass + + +class DecodeError(SerializationError): + """Cannot decode object.""" + + +class NotBoundError(KombuError): + """Trying to call channel dependent method on unbound entity.""" + pass + + +class MessageStateError(KombuError): + """The message has already been acknowledged.""" + pass + + +class LimitExceeded(KombuError): + """Limit exceeded.""" + pass + + +class ConnectionLimitExceeded(LimitExceeded): + """Maximum number of simultaneous connections exceeded.""" + pass + + +class ChannelLimitExceeded(LimitExceeded): + """Maximum number of simultaneous channels exceeded.""" + pass + + +class VersionMismatch(KombuError): + pass + + +class SerializerNotInstalled(KombuError): + """Support for the requested serialization type is not installed""" + pass + + +class ContentDisallowed(SerializerNotInstalled): + """Consumer does not allow this content-type.""" + pass + + +class InconsistencyError(ConnectionError): + """Data or environment has been found to be inconsistent, + depending on the cause it may be possible to retry the operation.""" + pass diff --git a/kombu/five.py b/kombu/five.py new file mode 100644 index 0000000..87abc09 --- /dev/null +++ b/kombu/five.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +""" + celery.five + ~~~~~~~~~~~ + + Compatibility implementations of features + only available in newer Python versions. + + +""" +from __future__ import absolute_import + +# ############# py3k ######################################################### +import sys +PY3 = sys.version_info[0] == 3 + +try: + reload = reload # noqa +except NameError: # pragma: no cover + from imp import reload # noqa + +try: + from collections import UserList # noqa +except ImportError: # pragma: no cover + from UserList import UserList # noqa + +try: + from collections import UserDict # noqa +except ImportError: # pragma: no cover + from UserDict import UserDict # noqa + +try: + bytes_t = bytes +except NameError: # pragma: no cover + bytes_t = str # noqa + +# ############# time.monotonic ############################################### + +if sys.version_info < (3, 3): + + import platform + SYSTEM = platform.system() + + if SYSTEM == 'Darwin': + import ctypes + from ctypes.util import find_library + libSystem = ctypes.CDLL('libSystem.dylib') + CoreServices = ctypes.CDLL(find_library('CoreServices'), + use_errno=True) + mach_absolute_time = libSystem.mach_absolute_time + mach_absolute_time.restype = ctypes.c_uint64 + absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds + absolute_to_nanoseconds.restype = ctypes.c_uint64 + absolute_to_nanoseconds.argtypes = [ctypes.c_uint64] + + def _monotonic(): + return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9 + + elif SYSTEM == 'Linux': + # from stackoverflow: + # questions/1205722/how-do-i-get-monotonic-time-durations-in-python + import ctypes + import os + + CLOCK_MONOTONIC = 1 # see + + class timespec(ctypes.Structure): + _fields_ = [ + ('tv_sec', ctypes.c_long), + ('tv_nsec', ctypes.c_long), + ] + + librt = ctypes.CDLL('librt.so.1', use_errno=True) + clock_gettime = librt.clock_gettime + clock_gettime.argtypes = [ + ctypes.c_int, ctypes.POINTER(timespec), + ] + + def _monotonic(): # noqa + t = timespec() + if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0: + errno_ = ctypes.get_errno() + raise OSError(errno_, os.strerror(errno_)) + return t.tv_sec + t.tv_nsec * 1e-9 + else: + from time import time as _monotonic +try: + from time import monotonic +except ImportError: + monotonic = _monotonic # noqa + +# ############# Py3 <-> Py2 ################################################## + +if PY3: # pragma: no cover + import builtins + + from queue import Queue, Empty, Full, LifoQueue + from itertools import zip_longest + from io import StringIO, BytesIO + + map = map + zip = zip + string = str + string_t = str + long_t = int + text_t = str + range = range + module_name_t = str + + open_fqdn = 'builtins.open' + + def items(d): + return d.items() + + def keys(d): + return d.keys() + + def values(d): + return d.values() + + def nextfun(it): + return it.__next__ + + exec_ = getattr(builtins, 'exec') + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + class WhateverIO(StringIO): + + def write(self, data): + if isinstance(data, bytes): + data = data.encode() + StringIO.write(self, data) + +else: + import __builtin__ as builtins # noqa + from Queue import Queue, Empty, Full, LifoQueue # noqa + from itertools import ( # noqa + imap as map, + izip as zip, + izip_longest as zip_longest, + ) + try: + from cStringIO import StringIO # noqa + except ImportError: # pragma: no cover + from StringIO import StringIO # noqa + + string = unicode # noqa + string_t = basestring # noqa + text_t = unicode + long_t = long # noqa + range = xrange + module_name_t = str + + open_fqdn = '__builtin__.open' + + def items(d): # noqa + return d.iteritems() + + def keys(d): # noqa + return d.iterkeys() + + def values(d): # noqa + return d.itervalues() + + def nextfun(it): # noqa + return it.next + + def exec_(code, globs=None, locs=None): # pragma: no cover + """Execute code in a namespace.""" + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") + + exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""") + + BytesIO = WhateverIO = StringIO # noqa + + +def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])): + """Class decorator to set metaclass. + + Works with both Python 3 and Python 3 and it does not add + an extra class in the lookup order like ``six.with_metaclass`` does + (that is -- it copies the original class instead of using inheritance). + + """ + + def _clone_with_metaclass(Class): + attrs = dict((key, value) for key, value in items(vars(Class)) + if key not in skip_attrs) + return Type(Class.__name__, Class.__bases__, attrs) + + return _clone_with_metaclass diff --git a/kombu/log.py b/kombu/log.py new file mode 100644 index 0000000..2a7db9b --- /dev/null +++ b/kombu/log.py @@ -0,0 +1,147 @@ +from __future__ import absolute_import + +import logging +import numbers +import os +import sys + +from logging.handlers import WatchedFileHandler + +from .five import string_t +from .utils import cached_property +from .utils.encoding import safe_repr, safe_str +from .utils.functional import maybe_evaluate + +__all__ = ['LogMixin', 'LOG_LEVELS', 'get_loglevel', 'setup_logging'] + +try: + LOG_LEVELS = dict(logging._nameToLevel) + LOG_LEVELS.update(logging._levelToName) +except AttributeError: + LOG_LEVELS = dict(logging._levelNames) +LOG_LEVELS.setdefault('FATAL', logging.FATAL) +LOG_LEVELS.setdefault(logging.FATAL, 'FATAL') +DISABLE_TRACEBACKS = os.environ.get('DISABLE_TRACEBACKS') + + +class NullHandler(logging.Handler): + + def emit(self, record): + pass + + +def get_logger(logger): + if isinstance(logger, string_t): + logger = logging.getLogger(logger) + if not logger.handlers: + logger.addHandler(NullHandler()) + return logger + + +def get_loglevel(level): + if isinstance(level, string_t): + return LOG_LEVELS[level] + return level + + +def naive_format_parts(fmt): + parts = fmt.split('%') + for i, e in enumerate(parts[1:]): + yield None if not e or not parts[i - 1] else e[0] + + +def safeify_format(fmt, args, + filters={'s': safe_str, + 'r': safe_repr}): + for index, type in enumerate(naive_format_parts(fmt)): + filt = filters.get(type) + yield filt(args[index]) if filt else args[index] + + +class LogMixin(object): + + def debug(self, *args, **kwargs): + return self.log(logging.DEBUG, *args, **kwargs) + + def info(self, *args, **kwargs): + return self.log(logging.INFO, *args, **kwargs) + + def warn(self, *args, **kwargs): + return self.log(logging.WARN, *args, **kwargs) + + def error(self, *args, **kwargs): + return self._error(logging.ERROR, *args, **kwargs) + + def critical(self, *args, **kwargs): + return self._error(logging.CRITICAL, *args, **kwargs) + + def _error(self, severity, *args, **kwargs): + kwargs.setdefault('exc_info', True) + if DISABLE_TRACEBACKS: + kwargs.pop('exc_info', None) + return self.log(severity, *args, **kwargs) + + def annotate(self, text): + return '%s - %s' % (self.logger_name, text) + + def log(self, severity, *args, **kwargs): + if self.logger.isEnabledFor(severity): + log = self.logger.log + if len(args) > 1 and isinstance(args[0], string_t): + expand = [maybe_evaluate(arg) for arg in args[1:]] + return log(severity, + self.annotate(args[0].replace('%r', '%s')), + *list(safeify_format(args[0], expand)), **kwargs) + else: + return self.logger.log( + severity, self.annotate(' '.join(map(safe_str, args))), + **kwargs) + + def get_logger(self): + return get_logger(self.logger_name) + + def is_enabled_for(self, level): + return self.logger.isEnabledFor(self.get_loglevel(level)) + + def get_loglevel(self, level): + if not isinstance(level, numbers.Integral): + return LOG_LEVELS[level] + return level + + @cached_property + def logger(self): + return self.get_logger() + + @property + def logger_name(self): + return self.__class__.__name__ + + +class Log(LogMixin): + + def __init__(self, name, logger=None): + self._logger_name = name + self._logger = logger + + def get_logger(self): + if self._logger: + return self._logger + return LogMixin.get_logger(self) + + @property + def logger_name(self): + return self._logger_name + + +def setup_logging(loglevel=None, logfile=None): + logger = logging.getLogger() + loglevel = get_loglevel(loglevel or 'ERROR') + logfile = logfile if logfile else sys.__stderr__ + if not logger.handlers: + if hasattr(logfile, 'write'): + handler = logging.StreamHandler(logfile) + else: + handler = WatchedFileHandler(logfile) + logger.addHandler(handler) + logger.setLevel(loglevel) + return logger diff --git a/kombu/message.py b/kombu/message.py new file mode 100644 index 0000000..5f7ae52 --- /dev/null +++ b/kombu/message.py @@ -0,0 +1,154 @@ +""" +kombu.transport.message +======================= + +Message class. + +""" +from __future__ import absolute_import + +import sys + +from .compression import decompress +from .exceptions import MessageStateError +from .five import reraise, text_t +from .serialization import loads + +ACK_STATES = frozenset(['ACK', 'REJECTED', 'REQUEUED']) + + +class Message(object): + """Base class for received messages.""" + __slots__ = ('_state', 'channel', 'delivery_tag', + 'content_type', 'content_encoding', + 'delivery_info', 'headers', 'properties', + 'body', '_decoded_cache', 'accept', '__dict__') + MessageStateError = MessageStateError + + errors = None + + def __init__(self, channel, body=None, delivery_tag=None, + content_type=None, content_encoding=None, delivery_info={}, + properties=None, headers=None, postencode=None, + accept=None, **kwargs): + self.errors = [] if self.errors is None else self.errors + self.channel = channel + self.delivery_tag = delivery_tag + self.content_type = content_type + self.content_encoding = content_encoding + self.delivery_info = delivery_info + self.headers = headers or {} + self.properties = properties or {} + self._decoded_cache = None + self._state = 'RECEIVED' + self.accept = accept + + compression = self.headers.get('compression') + if not self.errors and compression: + try: + body = decompress(body, compression) + except Exception: + self.errors.append(sys.exc_info()) + + if not self.errors and postencode and isinstance(body, text_t): + try: + body = body.encode(postencode) + except Exception: + self.errors.append(sys.exc_info()) + self.body = body + + def _reraise_error(self, callback=None): + try: + reraise(*self.errors[0]) + except Exception as exc: + if not callback: + raise + callback(self, exc) + + def ack(self): + """Acknowledge this message as being processed., + This will remove the message from the queue. + + :raises MessageStateError: If the message has already been + acknowledged/requeued/rejected. + + """ + if self.channel.no_ack_consumers is not None: + try: + consumer_tag = self.delivery_info['consumer_tag'] + except KeyError: + pass + else: + if consumer_tag in self.channel.no_ack_consumers: + return + if self.acknowledged: + raise self.MessageStateError( + 'Message already acknowledged with state: {0._state}'.format( + self)) + self.channel.basic_ack(self.delivery_tag) + self._state = 'ACK' + + def ack_log_error(self, logger, errors): + try: + self.ack() + except errors as exc: + logger.critical("Couldn't ack %r, reason:%r", + self.delivery_tag, exc, exc_info=True) + + def reject_log_error(self, logger, errors, requeue=False): + try: + self.reject(requeue=requeue) + except errors as exc: + logger.critical("Couldn't reject %r, reason: %r", + self.delivery_tag, exc, exc_info=True) + + def reject(self, requeue=False): + """Reject this message. + + The message will be discarded by the server. + + :raises MessageStateError: If the message has already been + acknowledged/requeued/rejected. + + """ + if self.acknowledged: + raise self.MessageStateError( + 'Message already acknowledged with state: {0._state}'.format( + self)) + self.channel.basic_reject(self.delivery_tag, requeue=requeue) + self._state = 'REJECTED' + + def requeue(self): + """Reject this message and put it back on the queue. + + You must not use this method as a means of selecting messages + to process. + + :raises MessageStateError: If the message has already been + acknowledged/requeued/rejected. + + """ + if self.acknowledged: + raise self.MessageStateError( + 'Message already acknowledged with state: {0._state}'.format( + self)) + self.channel.basic_reject(self.delivery_tag, requeue=True) + self._state = 'REQUEUED' + + def decode(self): + """Deserialize the message body, returning the original + python structure sent by the publisher.""" + return loads(self.body, self.content_type, + self.content_encoding, accept=self.accept) + + @property + def acknowledged(self): + """Set to true if the message has been acknowledged.""" + return self._state in ACK_STATES + + @property + def payload(self): + """The decoded message body.""" + if not self._decoded_cache: + self._decoded_cache = self.decode() + return self._decoded_cache diff --git a/kombu/messaging.py b/kombu/messaging.py new file mode 100644 index 0000000..8b92395 --- /dev/null +++ b/kombu/messaging.py @@ -0,0 +1,602 @@ +""" +kombu.messaging +=============== + +Sending and receiving messages. + +""" +from __future__ import absolute_import + +import numbers + +from itertools import count + +from .common import maybe_declare +from .compression import compress +from .connection import maybe_channel, is_connection +from .entity import Exchange, Queue, DELIVERY_MODES +from .exceptions import ContentDisallowed +from .five import text_t, values +from .serialization import dumps, prepare_accept_content +from .utils import ChannelPromise, maybe_list + +__all__ = ['Exchange', 'Queue', 'Producer', 'Consumer'] + + +class Producer(object): + """Message Producer. + + :param channel: Connection or channel. + :keyword exchange: Optional default exchange. + :keyword routing_key: Optional default routing key. + :keyword serializer: Default serializer. Default is `"json"`. + :keyword compression: Default compression method. Default is no + compression. + :keyword auto_declare: Automatically declare the default exchange + at instantiation. Default is :const:`True`. + :keyword on_return: Callback to call for undeliverable messages, + when the `mandatory` or `immediate` arguments to + :meth:`publish` is used. This callback needs the following + signature: `(exception, exchange, routing_key, message)`. + Note that the producer needs to drain events to use this feature. + + """ + + #: Default exchange + exchange = None + + #: Default routing key. + routing_key = '' + + #: Default serializer to use. Default is JSON. + serializer = None + + #: Default compression method. Disabled by default. + compression = None + + #: By default the exchange is declared at instantiation. + #: If you want to declare manually then you can set this + #: to :const:`False`. + auto_declare = True + + #: Basic return callback. + on_return = None + + #: Set if channel argument was a Connection instance (using + #: default_channel). + __connection__ = None + + def __init__(self, channel, exchange=None, routing_key=None, + serializer=None, auto_declare=None, compression=None, + on_return=None): + self._channel = channel + self.exchange = exchange + self.routing_key = routing_key or self.routing_key + self.serializer = serializer or self.serializer + self.compression = compression or self.compression + self.on_return = on_return or self.on_return + self._channel_promise = None + if self.exchange is None: + self.exchange = Exchange('') + if auto_declare is not None: + self.auto_declare = auto_declare + + if self._channel: + self.revive(self._channel) + + def __repr__(self): + return ''.format(self) + + def __reduce__(self): + return self.__class__, self.__reduce_args__() + + def __reduce_args__(self): + return (None, self.exchange, self.routing_key, self.serializer, + self.auto_declare, self.compression) + + def declare(self): + """Declare the exchange. + + This happens automatically at instantiation if + :attr:`auto_declare` is enabled. + + """ + if self.exchange.name: + self.exchange.declare() + + def maybe_declare(self, entity, retry=False, **retry_policy): + """Declare the exchange if it hasn't already been declared + during this session.""" + if entity: + return maybe_declare(entity, self.channel, retry, **retry_policy) + + def publish(self, body, routing_key=None, delivery_mode=None, + mandatory=False, immediate=False, priority=0, + content_type=None, content_encoding=None, serializer=None, + headers=None, compression=None, exchange=None, retry=False, + retry_policy=None, declare=[], **properties): + """Publish message to the specified exchange. + + :param body: Message body. + :keyword routing_key: Message routing key. + :keyword delivery_mode: See :attr:`delivery_mode`. + :keyword mandatory: Currently not supported. + :keyword immediate: Currently not supported. + :keyword priority: Message priority. A number between 0 and 9. + :keyword content_type: Content type. Default is auto-detect. + :keyword content_encoding: Content encoding. Default is auto-detect. + :keyword serializer: Serializer to use. Default is auto-detect. + :keyword compression: Compression method to use. Default is none. + :keyword headers: Mapping of arbitrary headers to pass along + with the message body. + :keyword exchange: Override the exchange. Note that this exchange + must have been declared. + :keyword declare: Optional list of required entities that must + have been declared before publishing the message. The entities + will be declared using :func:`~kombu.common.maybe_declare`. + :keyword retry: Retry publishing, or declaring entities if the + connection is lost. + :keyword retry_policy: Retry configuration, this is the keywords + supported by :meth:`~kombu.Connection.ensure`. + :keyword \*\*properties: Additional message properties, see AMQP spec. + + """ + headers = {} if headers is None else headers + retry_policy = {} if retry_policy is None else retry_policy + routing_key = self.routing_key if routing_key is None else routing_key + compression = self.compression if compression is None else compression + exchange = exchange or self.exchange + + if isinstance(exchange, Exchange): + delivery_mode = delivery_mode or exchange.delivery_mode + exchange = exchange.name + else: + delivery_mode = delivery_mode or self.exchange.delivery_mode + if not isinstance(delivery_mode, numbers.Integral): + delivery_mode = DELIVERY_MODES[delivery_mode] + properties['delivery_mode'] = delivery_mode + + body, content_type, content_encoding = self._prepare( + body, serializer, content_type, content_encoding, + compression, headers) + + publish = self._publish + if retry: + publish = self.connection.ensure(self, publish, **retry_policy) + return publish(body, priority, content_type, + content_encoding, headers, properties, + routing_key, mandatory, immediate, exchange, declare) + + def _publish(self, body, priority, content_type, content_encoding, + headers, properties, routing_key, mandatory, + immediate, exchange, declare): + channel = self.channel + message = channel.prepare_message( + body, priority, content_type, + content_encoding, headers, properties, + ) + if declare: + maybe_declare = self.maybe_declare + [maybe_declare(entity) for entity in declare] + return channel.basic_publish( + message, + exchange=exchange, routing_key=routing_key, + mandatory=mandatory, immediate=immediate, + ) + + def _get_channel(self): + channel = self._channel + if isinstance(channel, ChannelPromise): + channel = self._channel = channel() + self.exchange.revive(channel) + if self.on_return: + channel.events['basic_return'].add(self.on_return) + return channel + + def _set_channel(self, channel): + self._channel = channel + channel = property(_get_channel, _set_channel) + + def revive(self, channel): + """Revive the producer after connection loss.""" + if is_connection(channel): + connection = channel + self.__connection__ = connection + channel = ChannelPromise(lambda: connection.default_channel) + if isinstance(channel, ChannelPromise): + self._channel = channel + self.exchange = self.exchange(channel) + else: + # Channel already concrete + self._channel = channel + if self.on_return: + self._channel.events['basic_return'].add(self.on_return) + self.exchange = self.exchange(channel) + if self.auto_declare: + # auto_decare is not recommended as this will force + # evaluation of the channel. + self.declare() + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.release() + + def release(self): + pass + close = release + + def _prepare(self, body, serializer=None, content_type=None, + content_encoding=None, compression=None, headers=None): + + # No content_type? Then we're serializing the data internally. + if not content_type: + serializer = serializer or self.serializer + (content_type, content_encoding, + body) = dumps(body, serializer=serializer) + else: + # If the programmer doesn't want us to serialize, + # make sure content_encoding is set. + if isinstance(body, text_t): + if not content_encoding: + content_encoding = 'utf-8' + body = body.encode(content_encoding) + + # If they passed in a string, we can't know anything + # about it. So assume it's binary data. + elif not content_encoding: + content_encoding = 'binary' + + if compression: + body, headers['compression'] = compress(body, compression) + + return body, content_type, content_encoding + + @property + def connection(self): + try: + return self.__connection__ or self.channel.connection.client + except AttributeError: + pass + + +class Consumer(object): + """Message consumer. + + :param channel: see :attr:`channel`. + :param queues: see :attr:`queues`. + :keyword no_ack: see :attr:`no_ack`. + :keyword auto_declare: see :attr:`auto_declare` + :keyword callbacks: see :attr:`callbacks`. + :keyword on_message: See :attr:`on_message` + :keyword on_decode_error: see :attr:`on_decode_error`. + + """ + ContentDisallowed = ContentDisallowed + + #: The connection/channel to use for this consumer. + channel = None + + #: A single :class:`~kombu.Queue`, or a list of queues to + #: consume from. + queues = None + + #: Flag for automatic message acknowledgment. + #: If enabled the messages are automatically acknowledged by the + #: broker. This can increase performance but means that you + #: have no control of when the message is removed. + #: + #: Disabled by default. + no_ack = None + + #: By default all entities will be declared at instantiation, if you + #: want to handle this manually you can set this to :const:`False`. + auto_declare = True + + #: List of callbacks called in order when a message is received. + #: + #: The signature of the callbacks must take two arguments: + #: `(body, message)`, which is the decoded message body and + #: the `Message` instance (a subclass of + #: :class:`~kombu.transport.base.Message`). + callbacks = None + + #: Optional function called whenever a message is received. + #: + #: When defined this function will be called instead of the + #: :meth:`receive` method, and :attr:`callbacks` will be disabled. + #: + #: So this can be used as an alternative to :attr:`callbacks` when + #: you don't want the body to be automatically decoded. + #: Note that the message will still be decompressed if the message + #: has the ``compression`` header set. + #: + #: The signature of the callback must take a single argument, + #: which is the raw message object (a subclass of + #: :class:`~kombu.transport.base.Message`). + #: + #: Also note that the ``message.body`` attribute, which is the raw + #: contents of the message body, may in some cases be a read-only + #: :class:`buffer` object. + on_message = None + + #: Callback called when a message can't be decoded. + #: + #: The signature of the callback must take two arguments: `(message, + #: exc)`, which is the message that can't be decoded and the exception + #: that occurred while trying to decode it. + on_decode_error = None + + #: List of accepted content-types. + #: + #: An exception will be raised if the consumer receives + #: a message with an untrusted content type. + #: By default all content-types are accepted, but not if + #: :func:`kombu.disable_untrusted_serializers` was called, + #: in which case only json is allowed. + accept = None + + _tags = count(1) # global + + def __init__(self, channel, queues=None, no_ack=None, auto_declare=None, + callbacks=None, on_decode_error=None, on_message=None, + accept=None): + self.channel = channel + self.queues = self.queues or [] if queues is None else queues + self.no_ack = self.no_ack if no_ack is None else no_ack + self.callbacks = (self.callbacks or [] if callbacks is None + else callbacks) + self.on_message = on_message + self._active_tags = {} + if auto_declare is not None: + self.auto_declare = auto_declare + if on_decode_error is not None: + self.on_decode_error = on_decode_error + self.accept = prepare_accept_content(accept) + + if self.channel: + self.revive(self.channel) + + def revive(self, channel): + """Revive consumer after connection loss.""" + self._active_tags.clear() + channel = self.channel = maybe_channel(channel) + self.queues = [queue(self.channel) + for queue in maybe_list(self.queues)] + for queue in self.queues: + queue.revive(channel) + + if self.auto_declare: + self.declare() + + def declare(self): + """Declare queues, exchanges and bindings. + + This is done automatically at instantiation if :attr:`auto_declare` + is set. + + """ + for queue in self.queues: + queue.declare() + + def register_callback(self, callback): + """Register a new callback to be called when a message + is received. + + The signature of the callback needs to accept two arguments: + `(body, message)`, which is the decoded message body + and the `Message` instance (a subclass of + :class:`~kombu.transport.base.Message`. + + """ + self.callbacks.append(callback) + + def __enter__(self): + self.consume() + return self + + def __exit__(self, *exc_info): + try: + self.cancel() + except Exception: + pass + + def add_queue(self, queue): + """Add a queue to the list of queues to consume from. + + This will not start consuming from the queue, + for that you will have to call :meth:`consume` after. + + """ + queue = queue(self.channel) + if self.auto_declare: + queue.declare() + self.queues.append(queue) + return queue + + def add_queue_from_dict(self, queue, **options): + """This method is deprecated. + + Instead please use:: + + consumer.add_queue(Queue.from_dict(d)) + + """ + return self.add_queue(Queue.from_dict(queue, **options)) + + def consume(self, no_ack=None): + """Start consuming messages. + + Can be called multiple times, but note that while it + will consume from new queues added since the last call, + it will not cancel consuming from removed queues ( + use :meth:`cancel_by_queue`). + + :param no_ack: See :attr:`no_ack`. + + """ + if self.queues: + no_ack = self.no_ack if no_ack is None else no_ack + + H, T = self.queues[:-1], self.queues[-1] + for queue in H: + self._basic_consume(queue, no_ack=no_ack, nowait=True) + self._basic_consume(T, no_ack=no_ack, nowait=False) + + def cancel(self): + """End all active queue consumers. + + This does not affect already delivered messages, but it does + mean the server will not send any more messages for this consumer. + + """ + cancel = self.channel.basic_cancel + for tag in values(self._active_tags): + cancel(tag) + self._active_tags.clear() + close = cancel + + def cancel_by_queue(self, queue): + """Cancel consumer by queue name.""" + try: + tag = self._active_tags.pop(queue) + except KeyError: + pass + else: + self.queues[:] = [q for q in self.queues if q.name != queue] + self.channel.basic_cancel(tag) + + def consuming_from(self, queue): + """Return :const:`True` if the consumer is currently + consuming from queue'.""" + name = queue + if isinstance(queue, Queue): + name = queue.name + return name in self._active_tags + + def purge(self): + """Purge messages from all queues. + + .. warning:: + This will *delete all ready messages*, there is no + undo operation. + + """ + return sum(queue.purge() for queue in self.queues) + + def flow(self, active): + """Enable/disable flow from peer. + + This is a simple flow-control mechanism that a peer can use + to avoid overflowing its queues or otherwise finding itself + receiving more messages than it can process. + + The peer that receives a request to stop sending content + will finish sending the current content (if any), and then wait + until flow is reactivated. + + """ + self.channel.flow(active) + + def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False): + """Specify quality of service. + + The client can request that messages should be sent in + advance so that when the client finishes processing a message, + the following message is already held locally, rather than needing + to be sent down the channel. Prefetching gives a performance + improvement. + + The prefetch window is Ignored if the :attr:`no_ack` option is set. + + :param prefetch_size: Specify the prefetch window in octets. + The server will send a message in advance if it is equal to + or smaller in size than the available prefetch size (and + also falls within other prefetch limits). May be set to zero, + meaning "no specific limit", although other prefetch limits + may still apply. + + :param prefetch_count: Specify the prefetch window in terms of + whole messages. + + :param apply_global: Apply new settings globally on all channels. + + """ + return self.channel.basic_qos(prefetch_size, + prefetch_count, + apply_global) + + def recover(self, requeue=False): + """Redeliver unacknowledged messages. + + Asks the broker to redeliver all unacknowledged messages + on the specified channel. + + :keyword requeue: By default the messages will be redelivered + to the original recipient. With `requeue` set to true, the + server will attempt to requeue the message, potentially then + delivering it to an alternative subscriber. + + """ + return self.channel.basic_recover(requeue=requeue) + + def receive(self, body, message): + """Method called when a message is received. + + This dispatches to the registered :attr:`callbacks`. + + :param body: The decoded message body. + :param message: The `Message` instance. + + :raises NotImplementedError: If no consumer callbacks have been + registered. + + """ + callbacks = self.callbacks + if not callbacks: + raise NotImplementedError('Consumer does not have any callbacks') + [callback(body, message) for callback in callbacks] + + def _basic_consume(self, queue, consumer_tag=None, + no_ack=no_ack, nowait=True): + tag = self._active_tags.get(queue.name) + if tag is None: + tag = self._add_tag(queue, consumer_tag) + queue.consume(tag, self._receive_callback, + no_ack=no_ack, nowait=nowait) + return tag + + def _add_tag(self, queue, consumer_tag=None): + tag = consumer_tag or str(next(self._tags)) + self._active_tags[queue.name] = tag + return tag + + def _receive_callback(self, message): + accept = self.accept + on_m, channel, decoded = self.on_message, self.channel, None + try: + m2p = getattr(channel, 'message_to_python', None) + if m2p: + message = m2p(message) + if accept is not None: + message.accept = accept + if message.errors: + return message._reraise_error(self.on_decode_error) + decoded = None if on_m else message.decode() + except Exception as exc: + if not self.on_decode_error: + raise + self.on_decode_error(message, exc) + else: + return on_m(message) if on_m else self.receive(decoded, message) + + def __repr__(self): + return ''.format(self) + + @property + def connection(self): + try: + return self.channel.connection.client + except AttributeError: + pass diff --git a/kombu/mixins.py b/kombu/mixins.py new file mode 100644 index 0000000..081ae58 --- /dev/null +++ b/kombu/mixins.py @@ -0,0 +1,251 @@ +# -*- coding: utf-8 -*- +""" +kombu.mixins +============ + +Useful mixin classes. + +""" +from __future__ import absolute_import + +import socket + +from contextlib import contextmanager +from functools import partial +from itertools import count +from time import sleep + +from .common import ignore_errors +from .five import range +from .messaging import Consumer +from .log import get_logger +from .utils import cached_property, nested +from .utils.encoding import safe_repr +from .utils.limits import TokenBucket + +__all__ = ['ConsumerMixin'] + +logger = get_logger(__name__) +debug, info, warn, error = logger.debug, logger.info, logger.warn, logger.error + + +class ConsumerMixin(object): + """Convenience mixin for implementing consumer programs. + + It can be used outside of threads, with threads, or greenthreads + (eventlet/gevent) too. + + The basic class would need a :attr:`connection` attribute + which must be a :class:`~kombu.Connection` instance, + and define a :meth:`get_consumers` method that returns a list + of :class:`kombu.Consumer` instances to use. + Supporting multiple consumers is important so that multiple + channels can be used for different QoS requirements. + + **Example**: + + .. code-block:: python + + + class Worker(ConsumerMixin): + task_queue = Queue('tasks', Exchange('tasks'), 'tasks')) + + def __init__(self, connection): + self.connection = None + + def get_consumers(self, Consumer, channel): + return [Consumer(queues=[self.task_queue], + callback=[self.on_task])] + + def on_task(self, body, message): + print('Got task: {0!r}'.format(body)) + message.ack() + + **Additional handler methods**: + + * :meth:`extra_context` + + Optional extra context manager that will be entered + after the connection and consumers have been set up. + + Takes arguments ``(connection, channel)``. + + * :meth:`on_connection_error` + + Handler called if the connection is lost/ or + is unavailable. + + Takes arguments ``(exc, interval)``, where interval + is the time in seconds when the connection will be retried. + + The default handler will log the exception. + + * :meth:`on_connection_revived` + + Handler called as soon as the connection is re-established + after connection failure. + + Takes no arguments. + + * :meth:`on_consume_ready` + + Handler called when the consumer is ready to accept + messages. + + Takes arguments ``(connection, channel, consumers)``. + Also keyword arguments to ``consume`` are forwarded + to this handler. + + * :meth:`on_consume_end` + + Handler called after the consumers are cancelled. + Takes arguments ``(connection, channel)``. + + * :meth:`on_iteration` + + Handler called for every iteration while draining + events. + + Takes no arguments. + + * :meth:`on_decode_error` + + Handler called if a consumer was unable to decode + the body of a message. + + Takes arguments ``(message, exc)`` where message is the + original message object. + + The default handler will log the error and + acknowledge the message, so if you override make + sure to call super, or perform these steps yourself. + + """ + + #: maximum number of retries trying to re-establish the connection, + #: if the connection is lost/unavailable. + connect_max_retries = None + + #: When this is set to true the consumer should stop consuming + #: and return, so that it can be joined if it is the implementation + #: of a thread. + should_stop = False + + def get_consumers(self, Consumer, channel): + raise NotImplementedError('Subclass responsibility') + + def on_connection_revived(self): + pass + + def on_consume_ready(self, connection, channel, consumers, **kwargs): + pass + + def on_consume_end(self, connection, channel): + pass + + def on_iteration(self): + pass + + def on_decode_error(self, message, exc): + error("Can't decode message body: %r (type:%r encoding:%r raw:%r')", + exc, message.content_type, message.content_encoding, + safe_repr(message.body)) + message.ack() + + def on_connection_error(self, exc, interval): + warn('Broker connection error: %r. ' + 'Trying again in %s seconds.', exc, interval) + + @contextmanager + def extra_context(self, connection, channel): + yield + + def run(self, _tokens=1): + restart_limit = self.restart_limit + errors = (self.connection.connection_errors + + self.connection.channel_errors) + while not self.should_stop: + try: + if restart_limit.can_consume(_tokens): + for _ in self.consume(limit=None): # pragma: no cover + pass + else: + sleep(restart_limit.expected_time(_tokens)) + except errors: + warn('Connection to broker lost. ' + 'Trying to re-establish the connection...') + + @contextmanager + def consumer_context(self, **kwargs): + with self.Consumer() as (connection, channel, consumers): + with self.extra_context(connection, channel): + self.on_consume_ready(connection, channel, consumers, **kwargs) + yield connection, channel, consumers + + def consume(self, limit=None, timeout=None, safety_interval=1, **kwargs): + elapsed = 0 + with self.consumer_context(**kwargs) as (conn, channel, consumers): + for i in limit and range(limit) or count(): + if self.should_stop: + break + self.on_iteration() + try: + conn.drain_events(timeout=safety_interval) + except socket.timeout: + conn.heartbeat_check() + elapsed += safety_interval + if timeout and elapsed >= timeout: + raise + except socket.error: + if not self.should_stop: + raise + else: + yield + elapsed = 0 + debug('consume exiting') + + def maybe_conn_error(self, fun): + """Use :func:`kombu.common.ignore_errors` instead.""" + return ignore_errors(self, fun) + + def create_connection(self): + return self.connection.clone() + + @contextmanager + def establish_connection(self): + with self.create_connection() as conn: + conn.ensure_connection(self.on_connection_error, + self.connect_max_retries) + yield conn + + @contextmanager + def Consumer(self): + with self.establish_connection() as conn: + self.on_connection_revived() + info('Connected to %s', conn.as_uri()) + channel = conn.default_channel + cls = partial(Consumer, channel, + on_decode_error=self.on_decode_error) + with self._consume_from(*self.get_consumers(cls, channel)) as c: + yield conn, channel, c + debug('Consumers cancelled') + self.on_consume_end(conn, channel) + debug('Connection closed') + + def _consume_from(self, *consumers): + return nested(*consumers) + + @cached_property + def restart_limit(self): + # the AttributeError that can be catched from amqplib + # poses problems for the too often restarts protection + # in Connection.ensure_connection + return TokenBucket(1) + + @cached_property + def connection_errors(self): + return self.connection.connection_errors + + @cached_property + def channel_errors(self): + return self.connection.channel_errors diff --git a/kombu/pidbox.py b/kombu/pidbox.py new file mode 100644 index 0000000..3a31311 --- /dev/null +++ b/kombu/pidbox.py @@ -0,0 +1,364 @@ +""" +kombu.pidbox +=============== + +Generic process mailbox. + +""" +from __future__ import absolute_import + +import socket +import warnings + +from collections import defaultdict, deque +from copy import copy +from itertools import count +from threading import local +from time import time + +from . import Exchange, Queue, Consumer, Producer +from .clocks import LamportClock +from .common import maybe_declare, oid_from +from .exceptions import InconsistencyError +from .five import range +from .log import get_logger +from .utils import cached_property, kwdict, uuid, reprcall + +REPLY_QUEUE_EXPIRES = 10 + +W_PIDBOX_IN_USE = """\ +A node named {node.hostname} is already using this process mailbox! + +Maybe you forgot to shutdown the other node or did not do so properly? +Or if you meant to start multiple nodes on the same host please make sure +you give each node a unique node name! +""" + +__all__ = ['Node', 'Mailbox'] +logger = get_logger(__name__) +debug, error = logger.debug, logger.error + + +class Node(object): + + #: hostname of the node. + hostname = None + + #: the :class:`Mailbox` this is a node for. + mailbox = None + + #: map of method name/handlers. + handlers = None + + #: current context (passed on to handlers) + state = None + + #: current channel. + channel = None + + def __init__(self, hostname, state=None, channel=None, + handlers=None, mailbox=None): + self.channel = channel + self.mailbox = mailbox + self.hostname = hostname + self.state = state + self.adjust_clock = self.mailbox.clock.adjust + if handlers is None: + handlers = {} + self.handlers = handlers + + def Consumer(self, channel=None, no_ack=True, accept=None, **options): + queue = self.mailbox.get_queue(self.hostname) + + def verify_exclusive(name, messages, consumers): + if consumers: + warnings.warn(W_PIDBOX_IN_USE.format(node=self)) + queue.on_declared = verify_exclusive + + return Consumer( + channel or self.channel, [queue], no_ack=no_ack, + accept=self.mailbox.accept if accept is None else accept, + **options + ) + + def handler(self, fun): + self.handlers[fun.__name__] = fun + return fun + + def on_decode_error(self, message, exc): + error('Cannot decode message: %r', exc, exc_info=1) + + def listen(self, channel=None, callback=None): + consumer = self.Consumer(channel=channel, + callbacks=[callback or self.handle_message], + on_decode_error=self.on_decode_error) + consumer.consume() + return consumer + + def dispatch(self, method, arguments=None, + reply_to=None, ticket=None, **kwargs): + arguments = arguments or {} + debug('pidbox received method %s [reply_to:%s ticket:%s]', + reprcall(method, (), kwargs=arguments), reply_to, ticket) + handle = reply_to and self.handle_call or self.handle_cast + try: + reply = handle(method, kwdict(arguments)) + except SystemExit: + raise + except Exception as exc: + error('pidbox command error: %r', exc, exc_info=1) + reply = {'error': repr(exc)} + + if reply_to: + self.reply({self.hostname: reply}, + exchange=reply_to['exchange'], + routing_key=reply_to['routing_key'], + ticket=ticket) + return reply + + def handle(self, method, arguments={}): + return self.handlers[method](self.state, **arguments) + + def handle_call(self, method, arguments): + return self.handle(method, arguments) + + def handle_cast(self, method, arguments): + return self.handle(method, arguments) + + def handle_message(self, body, message=None): + destination = body.get('destination') + if message: + self.adjust_clock(message.headers.get('clock') or 0) + if not destination or self.hostname in destination: + return self.dispatch(**kwdict(body)) + dispatch_from_message = handle_message + + def reply(self, data, exchange, routing_key, ticket, **kwargs): + self.mailbox._publish_reply(data, exchange, routing_key, ticket, + channel=self.channel, + serializer=self.mailbox.serializer) + + +class Mailbox(object): + node_cls = Node + exchange_fmt = '%s.pidbox' + reply_exchange_fmt = 'reply.%s.pidbox' + + #: Name of application. + namespace = None + + #: Connection (if bound). + connection = None + + #: Exchange type (usually direct, or fanout for broadcast). + type = 'direct' + + #: mailbox exchange (init by constructor). + exchange = None + + #: exchange to send replies to. + reply_exchange = None + + #: Only accepts json messages by default. + accept = ['json'] + + #: Message serializer + serializer = None + + def __init__(self, namespace, + type='direct', connection=None, clock=None, + accept=None, serializer=None): + self.namespace = namespace + self.connection = connection + self.type = type + self.clock = LamportClock() if clock is None else clock + self.exchange = self._get_exchange(self.namespace, self.type) + self.reply_exchange = self._get_reply_exchange(self.namespace) + self._tls = local() + self.unclaimed = defaultdict(deque) + self.accept = self.accept if accept is None else accept + self.serializer = self.serializer if serializer is None else serializer + + def __call__(self, connection): + bound = copy(self) + bound.connection = connection + return bound + + def Node(self, hostname=None, state=None, channel=None, handlers=None): + hostname = hostname or socket.gethostname() + return self.node_cls(hostname, state, channel, handlers, mailbox=self) + + def call(self, destination, command, kwargs={}, + timeout=None, callback=None, channel=None): + return self._broadcast(command, kwargs, destination, + reply=True, timeout=timeout, + callback=callback, + channel=channel) + + def cast(self, destination, command, kwargs={}): + return self._broadcast(command, kwargs, destination, reply=False) + + def abcast(self, command, kwargs={}): + return self._broadcast(command, kwargs, reply=False) + + def multi_call(self, command, kwargs={}, timeout=1, + limit=None, callback=None, channel=None): + return self._broadcast(command, kwargs, reply=True, + timeout=timeout, limit=limit, + callback=callback, + channel=channel) + + def get_reply_queue(self): + oid = self.oid + return Queue( + '%s.%s' % (oid, self.reply_exchange.name), + exchange=self.reply_exchange, + routing_key=oid, + durable=False, + auto_delete=True, + queue_arguments={'x-expires': int(REPLY_QUEUE_EXPIRES * 1000)}, + ) + + @cached_property + def reply_queue(self): + return self.get_reply_queue() + + def get_queue(self, hostname): + return Queue('%s.%s.pidbox' % (hostname, self.namespace), + exchange=self.exchange, + durable=False, + auto_delete=True) + + def _publish_reply(self, reply, exchange, routing_key, ticket, + channel=None, **opts): + chan = channel or self.connection.default_channel + exchange = Exchange(exchange, exchange_type='direct', + delivery_mode='transient', + durable=False) + producer = Producer(chan, auto_declare=False) + try: + producer.publish( + reply, exchange=exchange, routing_key=routing_key, + declare=[exchange], headers={ + 'ticket': ticket, 'clock': self.clock.forward(), + }, + **opts + ) + except InconsistencyError: + pass # queue probably deleted and no one is expecting a reply. + + def _publish(self, type, arguments, destination=None, + reply_ticket=None, channel=None, timeout=None, + serializer=None): + message = {'method': type, + 'arguments': arguments, + 'destination': destination} + chan = channel or self.connection.default_channel + exchange = self.exchange + if reply_ticket: + maybe_declare(self.reply_queue(channel)) + message.update(ticket=reply_ticket, + reply_to={'exchange': self.reply_exchange.name, + 'routing_key': self.oid}) + serializer = serializer or self.serializer + producer = Producer(chan, auto_declare=False) + producer.publish( + message, exchange=exchange.name, declare=[exchange], + headers={'clock': self.clock.forward(), + 'expires': time() + timeout if timeout else 0}, + serializer=serializer, + ) + + def _broadcast(self, command, arguments=None, destination=None, + reply=False, timeout=1, limit=None, + callback=None, channel=None, serializer=None): + if destination is not None and \ + not isinstance(destination, (list, tuple)): + raise ValueError( + 'destination must be a list/tuple not {0}'.format( + type(destination))) + + arguments = arguments or {} + reply_ticket = reply and uuid() or None + chan = channel or self.connection.default_channel + + # Set reply limit to number of destinations (if specified) + if limit is None and destination: + limit = destination and len(destination) or None + + serializer = serializer or self.serializer + self._publish(command, arguments, destination=destination, + reply_ticket=reply_ticket, + channel=chan, + timeout=timeout, + serializer=serializer) + + if reply_ticket: + return self._collect(reply_ticket, limit=limit, + timeout=timeout, + callback=callback, + channel=chan) + + def _collect(self, ticket, + limit=None, timeout=1, callback=None, + channel=None, accept=None): + if accept is None: + accept = self.accept + chan = channel or self.connection.default_channel + queue = self.reply_queue + consumer = Consumer(channel, [queue], accept=accept, no_ack=True) + responses = [] + unclaimed = self.unclaimed + adjust_clock = self.clock.adjust + + try: + return unclaimed.pop(ticket) + except KeyError: + pass + + def on_message(body, message): + # ticket header added in kombu 2.5 + header = message.headers.get + adjust_clock(header('clock') or 0) + expires = header('expires') + if expires and time() > expires: + return + this_id = header('ticket', ticket) + if this_id == ticket: + if callback: + callback(body) + responses.append(body) + else: + unclaimed[this_id].append(body) + + consumer.register_callback(on_message) + try: + with consumer: + for i in limit and range(limit) or count(): + try: + self.connection.drain_events(timeout=timeout) + except socket.timeout: + break + return responses + finally: + chan.after_reply_message_received(queue.name) + + def _get_exchange(self, namespace, type): + return Exchange(self.exchange_fmt % namespace, + type=type, + durable=False, + delivery_mode='transient') + + def _get_reply_exchange(self, namespace): + return Exchange(self.reply_exchange_fmt % namespace, + type='direct', + durable=False, + delivery_mode='transient') + + @cached_property + def oid(self): + try: + return self._tls.OID + except AttributeError: + oid = self._tls.OID = oid_from(self) + return oid diff --git a/kombu/pools.py b/kombu/pools.py new file mode 100644 index 0000000..4d075e6 --- /dev/null +++ b/kombu/pools.py @@ -0,0 +1,153 @@ +""" +kombu.pools +=========== + +Public resource pools. + +""" +from __future__ import absolute_import + +import os + +from itertools import chain + +from .connection import Resource +from .five import range, values +from .messaging import Producer +from .utils import EqualityDict +from .utils.functional import lazy + +__all__ = ['ProducerPool', 'PoolGroup', 'register_group', + 'connections', 'producers', 'get_limit', 'set_limit', 'reset'] +_limit = [200] +_used = [False] +_groups = [] +use_global_limit = object() +disable_limit_protection = os.environ.get('KOMBU_DISABLE_LIMIT_PROTECTION') + + +class ProducerPool(Resource): + Producer = Producer + + def __init__(self, connections, *args, **kwargs): + self.connections = connections + self.Producer = kwargs.pop('Producer', None) or self.Producer + super(ProducerPool, self).__init__(*args, **kwargs) + + def _acquire_connection(self): + return self.connections.acquire(block=True) + + def create_producer(self): + conn = self._acquire_connection() + try: + return self.Producer(conn) + except BaseException: + conn.release() + raise + + def new(self): + return lazy(self.create_producer) + + def setup(self): + if self.limit: + for _ in range(self.limit): + self._resource.put_nowait(self.new()) + + def close_resource(self, resource): + pass + + def prepare(self, p): + if callable(p): + p = p() + if p._channel is None: + conn = self._acquire_connection() + try: + p.revive(conn) + except BaseException: + conn.release() + raise + return p + + def release(self, resource): + if resource.__connection__: + resource.__connection__.release() + resource.channel = None + super(ProducerPool, self).release(resource) + + +class PoolGroup(EqualityDict): + + def __init__(self, limit=None): + self.limit = limit + + def create(self, resource, limit): + raise NotImplementedError('PoolGroups must define ``create``') + + def __missing__(self, resource): + limit = self.limit + if limit is use_global_limit: + limit = get_limit() + if not _used[0]: + _used[0] = True + k = self[resource] = self.create(resource, limit) + return k + + +def register_group(group): + _groups.append(group) + return group + + +class Connections(PoolGroup): + + def create(self, connection, limit): + return connection.Pool(limit=limit) +connections = register_group(Connections(limit=use_global_limit)) + + +class Producers(PoolGroup): + + def create(self, connection, limit): + return ProducerPool(connections[connection], limit=limit) +producers = register_group(Producers(limit=use_global_limit)) + + +def _all_pools(): + return chain(*[(values(g) if g else iter([])) for g in _groups]) + + +def get_limit(): + return _limit[0] + + +def set_limit(limit, force=False, reset_after=False): + limit = limit or 0 + glimit = _limit[0] or 0 + if limit < glimit: + if not disable_limit_protection and (_used[0] and not force): + raise RuntimeError("Can't lower limit after pool in use.") + reset_after = True + if limit != glimit: + _limit[0] = limit + for pool in _all_pools(): + pool.limit = limit + if reset_after: + reset() + return limit + + +def reset(*args, **kwargs): + for pool in _all_pools(): + try: + pool.force_close_all() + except Exception: + pass + for group in _groups: + group.clear() + _used[0] = False + +try: + from multiprocessing.util import register_after_fork + register_after_fork(connections, reset) +except ImportError: # pragma: no cover + pass diff --git a/kombu/serialization.py b/kombu/serialization.py new file mode 100644 index 0000000..47b8f91 --- /dev/null +++ b/kombu/serialization.py @@ -0,0 +1,455 @@ +""" +kombu.serialization +=================== + +Serialization utilities. + +""" +from __future__ import absolute_import + +import codecs +import os +import sys + +import pickle as pypickle +try: + import cPickle as cpickle +except ImportError: # pragma: no cover + cpickle = None # noqa + +from collections import namedtuple +from contextlib import contextmanager + +from .exceptions import ( + ContentDisallowed, DecodeError, EncodeError, SerializerNotInstalled +) +from .five import BytesIO, reraise, text_t +from .utils import entrypoints +from .utils.encoding import str_to_bytes, bytes_t + +__all__ = ['pickle', 'loads', 'dumps', 'register', 'unregister'] +SKIP_DECODE = frozenset(['binary', 'ascii-8bit']) +TRUSTED_CONTENT = frozenset(['application/data', 'application/text']) + +if sys.platform.startswith('java'): # pragma: no cover + + def _decode(t, coding): + return codecs.getdecoder(coding)(t)[0] +else: + _decode = codecs.decode + +pickle = cpickle or pypickle +pickle_load = pickle.load + +#: Kombu requires Python 2.5 or later so we use protocol 2 by default. +#: There's a new protocol (3) but this is only supported by Python 3. +pickle_protocol = int(os.environ.get('PICKLE_PROTOCOL', 2)) + +codec = namedtuple('codec', ('content_type', 'content_encoding', 'encoder')) + + +@contextmanager +def _reraise_errors(wrapper, + include=(Exception, ), exclude=(SerializerNotInstalled, )): + try: + yield + except exclude: + raise + except include as exc: + reraise(wrapper, wrapper(exc), sys.exc_info()[2]) + + +def pickle_loads(s, load=pickle_load): + # used to support buffer objects + return load(BytesIO(s)) + + +def parenthesize_alias(first, second): + return '%s (%s)' % (first, second) if first else second + + +class SerializerRegistry(object): + """The registry keeps track of serialization methods.""" + + def __init__(self): + self._encoders = {} + self._decoders = {} + self._default_encode = None + self._default_content_type = None + self._default_content_encoding = None + self._disabled_content_types = set() + self.type_to_name = {} + self.name_to_type = {} + + def register(self, name, encoder, decoder, content_type, + content_encoding='utf-8'): + if encoder: + self._encoders[name] = codec( + content_type, content_encoding, encoder, + ) + if decoder: + self._decoders[content_type] = decoder + self.type_to_name[content_type] = name + self.name_to_type[name] = content_type + + def enable(self, name): + if '/' not in name: + name = self.name_to_type[name] + self._disabled_content_types.discard(name) + + def disable(self, name): + if '/' not in name: + name = self.name_to_type[name] + self._disabled_content_types.add(name) + + def unregister(self, name): + try: + content_type = self.name_to_type[name] + self._decoders.pop(content_type, None) + self._encoders.pop(name, None) + self.type_to_name.pop(content_type, None) + self.name_to_type.pop(name, None) + except KeyError: + raise SerializerNotInstalled( + 'No encoder/decoder installed for {0}'.format(name)) + + def _set_default_serializer(self, name): + """ + Set the default serialization method used by this library. + + :param name: The name of the registered serialization method. + For example, `json` (default), `pickle`, `yaml`, `msgpack`, + or any custom methods registered using :meth:`register`. + + :raises SerializerNotInstalled: If the serialization method + requested is not available. + """ + try: + (self._default_content_type, self._default_content_encoding, + self._default_encode) = self._encoders[name] + except KeyError: + raise SerializerNotInstalled( + 'No encoder installed for {0}'.format(name)) + + def dumps(self, data, serializer=None): + if serializer == 'raw': + return raw_encode(data) + if serializer and not self._encoders.get(serializer): + raise SerializerNotInstalled( + 'No encoder installed for {0}'.format(serializer)) + + # If a raw string was sent, assume binary encoding + # (it's likely either ASCII or a raw binary file, and a character + # set of 'binary' will encompass both, even if not ideal. + if not serializer and isinstance(data, bytes_t): + # In Python 3+, this would be "bytes"; allow binary data to be + # sent as a message without getting encoder errors + return 'application/data', 'binary', data + + # For Unicode objects, force it into a string + if not serializer and isinstance(data, text_t): + with _reraise_errors(EncodeError, exclude=()): + payload = data.encode('utf-8') + return 'text/plain', 'utf-8', payload + + if serializer: + content_type, content_encoding, encoder = \ + self._encoders[serializer] + else: + encoder = self._default_encode + content_type = self._default_content_type + content_encoding = self._default_content_encoding + + with _reraise_errors(EncodeError): + payload = encoder(data) + return content_type, content_encoding, payload + encode = dumps # XXX compat + + def loads(self, data, content_type, content_encoding, + accept=None, force=False, _trusted_content=TRUSTED_CONTENT): + content_type = content_type or 'application/data' + if accept is not None: + if content_type not in _trusted_content \ + and content_type not in accept: + raise self._for_untrusted_content(content_type, 'untrusted') + else: + if content_type in self._disabled_content_types and not force: + raise self._for_untrusted_content(content_type, 'disabled') + content_encoding = (content_encoding or 'utf-8').lower() + + if data: + decode = self._decoders.get(content_type) + if decode: + with _reraise_errors(DecodeError): + return decode(data) + if content_encoding not in SKIP_DECODE and \ + not isinstance(data, text_t): + with _reraise_errors(DecodeError): + return _decode(data, content_encoding) + return data + decode = loads # XXX compat + + def _for_untrusted_content(self, ctype, why): + return ContentDisallowed( + 'Refusing to deserialize {0} content of type {1}'.format( + why, + parenthesize_alias(self.type_to_name.get(ctype, ctype), ctype), + ), + ) + + +#: Global registry of serializers/deserializers. +registry = SerializerRegistry() + + +""" +.. function:: dumps(data, serializer=default_serializer) + + Serialize a data structure into a string suitable for sending + as an AMQP message body. + + :param data: The message data to send. Can be a list, + dictionary or a string. + + :keyword serializer: An optional string representing + the serialization method you want the data marshalled + into. (For example, `json`, `raw`, or `pickle`). + + If :const:`None` (default), then json will be used, unless + `data` is a :class:`str` or :class:`unicode` object. In this + latter case, no serialization occurs as it would be + unnecessary. + + Note that if `serializer` is specified, then that + serialization method will be used even if a :class:`str` + or :class:`unicode` object is passed in. + + :returns: A three-item tuple containing the content type + (e.g., `application/json`), content encoding, (e.g., + `utf-8`) and a string containing the serialized + data. + + :raises SerializerNotInstalled: If the serialization method + requested is not available. +""" +dumps = encode = registry.encode # XXX encode is a compat alias + +""" +.. function:: loads(data, content_type, content_encoding): + + Deserialize a data stream as serialized using `dumps` + based on `content_type`. + + :param data: The message data to deserialize. + + :param content_type: The content-type of the data. + (e.g., `application/json`). + + :param content_encoding: The content-encoding of the data. + (e.g., `utf-8`, `binary`, or `us-ascii`). + + :returns: The unserialized data. + +""" +loads = decode = registry.decode # XXX decode is a compat alias + + +""" +.. function:: register(name, encoder, decoder, content_type, + content_encoding='utf-8'): + Register a new encoder/decoder. + + :param name: A convenience name for the serialization method. + + :param encoder: A method that will be passed a python data structure + and should return a string representing the serialized data. + If :const:`None`, then only a decoder will be registered. Encoding + will not be possible. + + :param decoder: A method that will be passed a string representing + serialized data and should return a python data structure. + If :const:`None`, then only an encoder will be registered. + Decoding will not be possible. + + :param content_type: The mime-type describing the serialized + structure. + + :param content_encoding: The content encoding (character set) that + the `decoder` method will be returning. Will usually be + `utf-8`, `us-ascii`, or `binary`. + +""" +register = registry.register + + +""" +.. function:: unregister(name): + Unregister registered encoder/decoder. + + :param name: Registered serialization method name. + +""" +unregister = registry.unregister + + +def raw_encode(data): + """Special case serializer.""" + content_type = 'application/data' + payload = data + if isinstance(payload, text_t): + content_encoding = 'utf-8' + with _reraise_errors(EncodeError, exclude=()): + payload = payload.encode(content_encoding) + else: + content_encoding = 'binary' + return content_type, content_encoding, payload + + +def register_json(): + """Register a encoder/decoder for JSON serialization.""" + from anyjson import loads as json_loads, dumps as json_dumps + + def _loads(obj): + if isinstance(obj, bytes_t): + obj = obj.decode() + return json_loads(obj) + + registry.register('json', json_dumps, _loads, + content_type='application/json', + content_encoding='utf-8') + + +def register_yaml(): + """Register a encoder/decoder for YAML serialization. + + It is slower than JSON, but allows for more data types + to be serialized. Useful if you need to send data such as dates""" + try: + import yaml + registry.register('yaml', yaml.safe_dump, yaml.safe_load, + content_type='application/x-yaml', + content_encoding='utf-8') + except ImportError: + + def not_available(*args, **kwargs): + """In case a client receives a yaml message, but yaml + isn't installed.""" + raise SerializerNotInstalled( + 'No decoder installed for YAML. Install the PyYAML library') + registry.register('yaml', None, not_available, 'application/x-yaml') + + +if sys.version_info[0] == 3: # pragma: no cover + + def unpickle(s): + return pickle_loads(str_to_bytes(s)) + +else: + unpickle = pickle_loads # noqa + + +def register_pickle(): + """The fastest serialization method, but restricts + you to python clients.""" + + def pickle_dumps(obj, dumper=pickle.dumps): + return dumper(obj, protocol=pickle_protocol) + + registry.register('pickle', pickle_dumps, unpickle, + content_type='application/x-python-serialize', + content_encoding='binary') + + +def register_msgpack(): + """See http://msgpack.sourceforge.net/""" + try: + try: + from msgpack import packb as pack, unpackb + unpack = lambda s: unpackb(s, encoding='utf-8') + except ImportError: + # msgpack < 0.2.0 and Python 2.5 + from msgpack import packs as pack, unpacks as unpack # noqa + registry.register( + 'msgpack', pack, unpack, + content_type='application/x-msgpack', + content_encoding='binary') + except (ImportError, ValueError): + + def not_available(*args, **kwargs): + """In case a client receives a msgpack message, but yaml + isn't installed.""" + raise SerializerNotInstalled( + 'No decoder installed for msgpack. ' + 'Please install the msgpack library') + registry.register('msgpack', None, not_available, + 'application/x-msgpack') + +# Register the base serialization methods. +register_json() +register_pickle() +register_yaml() +register_msgpack() + +# Default serializer is 'json' +registry._set_default_serializer('json') + + +_setupfuns = { + 'json': register_json, + 'pickle': register_pickle, + 'yaml': register_yaml, + 'msgpack': register_msgpack, + 'application/json': register_json, + 'application/x-yaml': register_yaml, + 'application/x-python-serialize': register_pickle, + 'application/x-msgpack': register_msgpack, +} + + +def enable_insecure_serializers(choices=['pickle', 'yaml', 'msgpack']): + """Enable serializers that are considered to be unsafe. + + Will enable ``pickle``, ``yaml`` and ``msgpack`` by default, + but you can also specify a list of serializers (by name or content type) + to enable. + + """ + for choice in choices: + try: + registry.enable(choice) + except KeyError: + pass + + +def disable_insecure_serializers(allowed=['json']): + """Disable untrusted serializers. + + Will disable all serializers except ``json`` + or you can specify a list of deserializers to allow. + + .. note:: + + Producers will still be able to serialize data + in these formats, but consumers will not accept + incoming data using the untrusted content types. + + """ + for name in registry._decoders: + registry.disable(name) + if allowed is not None: + for name in allowed: + registry.enable(name) + + +# Insecure serializers are disabled by default since v3.0 +disable_insecure_serializers() + +# Load entrypoints from installed extensions +for ep, args in entrypoints('kombu.serializers'): # pragma: no cover + register(ep.name, *args) + + +def prepare_accept_content(l, name_to_type=registry.name_to_type): + if l is not None: + return set(n if '/' in n else name_to_type[n] for n in l) + return l diff --git a/kombu/simple.py b/kombu/simple.py new file mode 100644 index 0000000..1b1d8e7 --- /dev/null +++ b/kombu/simple.py @@ -0,0 +1,137 @@ +""" +kombu.simple +============ + +Simple interface. + +""" +from __future__ import absolute_import + +import socket + +from collections import deque + +from . import entity +from . import messaging +from .connection import maybe_channel +from .five import Empty, monotonic + +__all__ = ['SimpleQueue', 'SimpleBuffer'] + + +class SimpleBase(object): + Empty = Empty + _consuming = False + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + def __init__(self, channel, producer, consumer, no_ack=False): + self.channel = maybe_channel(channel) + self.producer = producer + self.consumer = consumer + self.no_ack = no_ack + self.queue = self.consumer.queues[0] + self.buffer = deque() + self.consumer.register_callback(self._receive) + + def get(self, block=True, timeout=None): + if not block: + return self.get_nowait() + self._consume() + elapsed = 0.0 + remaining = timeout + while True: + time_start = monotonic() + if self.buffer: + return self.buffer.pop() + try: + self.channel.connection.client.drain_events( + timeout=timeout and remaining) + except socket.timeout: + raise self.Empty() + elapsed += monotonic() - time_start + remaining = timeout and timeout - elapsed or None + + def get_nowait(self): + m = self.queue.get(no_ack=self.no_ack) + if not m: + raise self.Empty() + return m + + def put(self, message, serializer=None, headers=None, compression=None, + routing_key=None, **kwargs): + self.producer.publish(message, + serializer=serializer, + routing_key=routing_key, + headers=headers, + compression=compression, + **kwargs) + + def clear(self): + return self.consumer.purge() + + def qsize(self): + _, size, _ = self.queue.queue_declare(passive=True) + return size + + def close(self): + self.consumer.cancel() + + def _receive(self, message_data, message): + self.buffer.append(message) + + def _consume(self): + if not self._consuming: + self.consumer.consume(no_ack=self.no_ack) + self._consuming = True + + def __len__(self): + """`len(self) -> self.qsize()`""" + return self.qsize() + + def __bool__(self): + return True + __nonzero__ = __bool__ + + +class SimpleQueue(SimpleBase): + no_ack = False + queue_opts = {} + exchange_opts = {'type': 'direct'} + + def __init__(self, channel, name, no_ack=None, queue_opts=None, + exchange_opts=None, serializer=None, + compression=None, **kwargs): + queue = name + queue_opts = dict(self.queue_opts, **queue_opts or {}) + exchange_opts = dict(self.exchange_opts, **exchange_opts or {}) + if no_ack is None: + no_ack = self.no_ack + if not isinstance(queue, entity.Queue): + exchange = entity.Exchange(name, **exchange_opts) + queue = entity.Queue(name, exchange, name, **queue_opts) + routing_key = name + else: + name = queue.name + exchange = queue.exchange + routing_key = queue.routing_key + producer = messaging.Producer(channel, exchange, + serializer=serializer, + routing_key=routing_key, + compression=compression) + consumer = messaging.Consumer(channel, queue) + super(SimpleQueue, self).__init__(channel, producer, + consumer, no_ack, **kwargs) + + +class SimpleBuffer(SimpleQueue): + no_ack = True + queue_opts = dict(durable=False, + auto_delete=True) + exchange_opts = dict(durable=False, + delivery_mode='transient', + auto_delete=True) diff --git a/kombu/syn.py b/kombu/syn.py new file mode 100644 index 0000000..01b4d47 --- /dev/null +++ b/kombu/syn.py @@ -0,0 +1,53 @@ +""" +kombu.syn +========= + +""" +from __future__ import absolute_import + +import sys + +__all__ = ['detect_environment'] + +_environment = None + + +def blocking(fun, *args, **kwargs): + return fun(*args, **kwargs) + + +def select_blocking_method(type): + pass + + +def _detect_environment(): + # ## -eventlet- + if 'eventlet' in sys.modules: + try: + from eventlet.patcher import is_monkey_patched as is_eventlet + import socket + + if is_eventlet(socket): + return 'eventlet' + except ImportError: + pass + + # ## -gevent- + if 'gevent' in sys.modules: + try: + from gevent import socket as _gsocket + import socket + + if socket.socket is _gsocket.socket: + return 'gevent' + except ImportError: + pass + + return 'default' + + +def detect_environment(): + global _environment + if _environment is None: + _environment = _detect_environment() + return _environment diff --git a/kombu/tests/__init__.py b/kombu/tests/__init__.py new file mode 100644 index 0000000..fb9f21a --- /dev/null +++ b/kombu/tests/__init__.py @@ -0,0 +1,91 @@ +from __future__ import absolute_import + +import anyjson +import atexit +import os +import sys + +from kombu.exceptions import VersionMismatch + +# avoid json implementation inconsistencies. +try: + import json # noqa + anyjson.force_implementation('json') +except ImportError: + anyjson.force_implementation('simplejson') + + +def teardown(): + # Workaround for multiprocessing bug where logging + # is attempted after global already collected at shutdown. + cancelled = set() + try: + import multiprocessing.util + cancelled.add(multiprocessing.util._exit_function) + except (AttributeError, ImportError): + pass + + try: + atexit._exithandlers[:] = [ + e for e in atexit._exithandlers if e[0] not in cancelled + ] + except AttributeError: # pragma: no cover + pass # Py3 missing _exithandlers + + +def find_distribution_modules(name=__name__, file=__file__): + current_dist_depth = len(name.split('.')) - 1 + current_dist = os.path.join(os.path.dirname(file), + *([os.pardir] * current_dist_depth)) + abs = os.path.abspath(current_dist) + dist_name = os.path.basename(abs) + + for dirpath, dirnames, filenames in os.walk(abs): + package = (dist_name + dirpath[len(abs):]).replace('/', '.') + if '__init__.py' in filenames: + yield package + for filename in filenames: + if filename.endswith('.py') and filename != '__init__.py': + yield '.'.join([package, filename])[:-3] + + +def import_all_modules(name=__name__, file=__file__, skip=[]): + for module in find_distribution_modules(name, file): + if module not in skip: + print('preimporting %r for coverage...' % (module, )) + try: + __import__(module) + except (ImportError, VersionMismatch, AttributeError): + pass + + +def is_in_coverage(): + return (os.environ.get('COVER_ALL_MODULES') or + '--with-coverage3' in sys.argv) + + +def setup_django_env(): + try: + from django.conf import settings + except ImportError: + return + + if not settings.configured: + settings.configure( + DATABASES={ + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': ':memory:', + }, + }, + DATABASE_ENGINE='sqlite3', + DATABASE_NAME=':memory:', + INSTALLED_APPS=('kombu.transport.django', ), + ) + + +def setup(): + # so coverage sees all our modules. + setup_django_env() + if is_in_coverage(): + import_all_modules() diff --git a/kombu/tests/async/__init__.py b/kombu/tests/async/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kombu/tests/async/test_hub.py b/kombu/tests/async/test_hub.py new file mode 100644 index 0000000..7d5d81c --- /dev/null +++ b/kombu/tests/async/test_hub.py @@ -0,0 +1,33 @@ +from __future__ import absolute_import + +from kombu.async import hub as _hub +from kombu.async.hub import Hub, get_event_loop, set_event_loop + +from kombu.tests.case import Case + + +class test_Utils(Case): + + def setUp(self): + self._prev_loop = get_event_loop() + + def tearDown(self): + set_event_loop(self._prev_loop) + + def test_get_set_event_loop(self): + set_event_loop(None) + self.assertIsNone(_hub._current_loop) + self.assertIsNone(get_event_loop()) + hub = Hub() + set_event_loop(hub) + self.assertIs(_hub._current_loop, hub) + self.assertIs(get_event_loop(), hub) + + +class test_Hub(Case): + + def setUp(self): + self.hub = Hub() + + def tearDown(self): + self.hub.close() diff --git a/kombu/tests/async/test_semaphore.py b/kombu/tests/async/test_semaphore.py new file mode 100644 index 0000000..5ca48de --- /dev/null +++ b/kombu/tests/async/test_semaphore.py @@ -0,0 +1,45 @@ +from __future__ import absolute_import + +from kombu.async.semaphore import LaxBoundedSemaphore + +from kombu.tests.case import Case + + +class test_LaxBoundedSemaphore(Case): + + def test_over_release(self): + x = LaxBoundedSemaphore(2) + calls = [] + for i in range(1, 21): + x.acquire(calls.append, i) + x.release() + x.acquire(calls.append, 'x') + x.release() + x.acquire(calls.append, 'y') + + self.assertEqual(calls, [1, 2, 3, 4]) + + for i in range(30): + x.release() + self.assertEqual(calls, list(range(1, 21)) + ['x', 'y']) + self.assertEqual(x.value, x.initial_value) + + calls[:] = [] + for i in range(1, 11): + x.acquire(calls.append, i) + for i in range(1, 11): + x.release() + self.assertEqual(calls, list(range(1, 11))) + + calls[:] = [] + self.assertEqual(x.value, x.initial_value) + x.acquire(calls.append, 'x') + self.assertEqual(x.value, 1) + x.acquire(calls.append, 'y') + self.assertEqual(x.value, 0) + x.release() + self.assertEqual(x.value, 1) + x.release() + self.assertEqual(x.value, 2) + x.release() + self.assertEqual(x.value, 2) diff --git a/kombu/tests/case.py b/kombu/tests/case.py new file mode 100644 index 0000000..e8b6d32 --- /dev/null +++ b/kombu/tests/case.py @@ -0,0 +1,191 @@ +from __future__ import absolute_import + +import os +import sys +import types + +from functools import wraps + +import mock + +from nose import SkipTest + +from kombu.five import builtins, string_t, StringIO +from kombu.utils.encoding import ensure_bytes + +try: + import unittest + unittest.skip +except AttributeError: + import unittest2 as unittest # noqa + +PY3 = sys.version_info[0] == 3 + +patch = mock.patch +call = mock.call + + +class Case(unittest.TestCase): + + def assertItemsEqual(self, a, b, *args, **kwargs): + return self.assertEqual(sorted(a), sorted(b), *args, **kwargs) + assertSameElements = assertItemsEqual + + +class Mock(mock.Mock): + + def __init__(self, *args, **kwargs): + attrs = kwargs.pop('attrs', None) or {} + super(Mock, self).__init__(*args, **kwargs) + for attr_name, attr_value in attrs.items(): + setattr(self, attr_name, attr_value) + + +class _ContextMock(Mock): + """Dummy class implementing __enter__ and __exit__ + as the with statement requires these to be implemented + in the class, not just the instance.""" + + def __enter__(self): + pass + + def __exit__(self, *exc_info): + pass + + +def ContextMock(*args, **kwargs): + obj = _ContextMock(*args, **kwargs) + obj.attach_mock(Mock(), '__enter__') + obj.attach_mock(Mock(), '__exit__') + obj.__enter__.return_value = obj + # if __exit__ return a value the exception is ignored, + # so it must return None here. + obj.__exit__.return_value = None + return obj + + +class MockPool(object): + + def __init__(self, value=None): + self.value = value or ContextMock() + + def acquire(self, **kwargs): + return self.value + + +def redirect_stdouts(fun): + + @wraps(fun) + def _inner(*args, **kwargs): + sys.stdout = StringIO() + sys.stderr = StringIO() + try: + return fun(*args, **dict(kwargs, + stdout=sys.stdout, stderr=sys.stderr)) + finally: + sys.stdout = sys.__stdout__ + sys.stderr = sys.__stderr__ + + return _inner + + +def module_exists(*modules): + + def _inner(fun): + + @wraps(fun) + def __inner(*args, **kwargs): + gen = [] + for module in modules: + if isinstance(module, string_t): + if not PY3: + module = ensure_bytes(module) + module = types.ModuleType(module) + gen.append(module) + sys.modules[module.__name__] = module + name = module.__name__ + if '.' in name: + parent, _, attr = name.rpartition('.') + setattr(sys.modules[parent], attr, module) + try: + return fun(*args, **kwargs) + finally: + for module in gen: + sys.modules.pop(module.__name__, None) + + return __inner + return _inner + + +# Taken from +# http://bitbucket.org/runeh/snippets/src/tip/missing_modules.py +def mask_modules(*modnames): + def _inner(fun): + + @wraps(fun) + def __inner(*args, **kwargs): + realimport = builtins.__import__ + + def myimp(name, *args, **kwargs): + if name in modnames: + raise ImportError('No module named %s' % name) + else: + return realimport(name, *args, **kwargs) + + builtins.__import__ = myimp + try: + return fun(*args, **kwargs) + finally: + builtins.__import__ = realimport + + return __inner + return _inner + + +def skip_if_environ(env_var_name): + + def _wrap_test(fun): + + @wraps(fun) + def _skips_if_environ(*args, **kwargs): + if os.environ.get(env_var_name): + raise SkipTest('SKIP %s: %s set\n' % ( + fun.__name__, env_var_name)) + return fun(*args, **kwargs) + + return _skips_if_environ + + return _wrap_test + + +def skip_if_module(module): + def _wrap_test(fun): + @wraps(fun) + def _skip_if_module(*args, **kwargs): + try: + __import__(module) + raise SkipTest('SKIP %s: %s available\n' % ( + fun.__name__, module)) + except ImportError: + pass + return fun(*args, **kwargs) + return _skip_if_module + return _wrap_test + + +def skip_if_not_module(module, import_errors=(ImportError, )): + def _wrap_test(fun): + @wraps(fun) + def _skip_if_not_module(*args, **kwargs): + try: + __import__(module) + except import_errors: + raise SkipTest('SKIP %s: %s available\n' % ( + fun.__name__, module)) + return fun(*args, **kwargs) + return _skip_if_not_module + return _wrap_test + + +def skip_if_quick(fun): + return skip_if_environ('QUICKTEST')(fun) diff --git a/kombu/tests/mocks.py b/kombu/tests/mocks.py new file mode 100644 index 0000000..836457e --- /dev/null +++ b/kombu/tests/mocks.py @@ -0,0 +1,148 @@ +from __future__ import absolute_import + +from itertools import count + +import anyjson + +from kombu.transport import base + + +class Message(base.Message): + + def __init__(self, *args, **kwargs): + self.throw_decode_error = kwargs.get('throw_decode_error', False) + super(Message, self).__init__(*args, **kwargs) + + def decode(self): + if self.throw_decode_error: + raise ValueError("can't decode message") + return super(Message, self).decode() + + +class Channel(base.StdChannel): + open = True + throw_decode_error = False + _ids = count(1) + + def __init__(self, connection): + self.connection = connection + self.called = [] + self.deliveries = count(1) + self.to_deliver = [] + self.events = {'basic_return': set()} + self.channel_id = next(self._ids) + + def _called(self, name): + self.called.append(name) + + def __contains__(self, key): + return key in self.called + + def exchange_declare(self, *args, **kwargs): + self._called('exchange_declare') + + def prepare_message(self, body, priority=0, content_type=None, + content_encoding=None, headers=None, properties={}): + self._called('prepare_message') + return dict(body=body, + headers=headers, + properties=properties, + priority=priority, + content_type=content_type, + content_encoding=content_encoding) + + def basic_publish(self, message, exchange='', routing_key='', + mandatory=False, immediate=False, **kwargs): + self._called('basic_publish') + return message, exchange, routing_key + + def exchange_delete(self, *args, **kwargs): + self._called('exchange_delete') + + def queue_declare(self, *args, **kwargs): + self._called('queue_declare') + + def queue_bind(self, *args, **kwargs): + self._called('queue_bind') + + def queue_unbind(self, *args, **kwargs): + self._called('queue_unbind') + + def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs): + self._called('queue_delete') + + def basic_get(self, *args, **kwargs): + self._called('basic_get') + try: + return self.to_deliver.pop() + except IndexError: + pass + + def queue_purge(self, *args, **kwargs): + self._called('queue_purge') + + def basic_consume(self, *args, **kwargs): + self._called('basic_consume') + + def basic_cancel(self, *args, **kwargs): + self._called('basic_cancel') + + def basic_ack(self, *args, **kwargs): + self._called('basic_ack') + + def basic_recover(self, requeue=False): + self._called('basic_recover') + + def exchange_bind(self, *args, **kwargs): + self._called('exchange_bind') + + def exchange_unbind(self, *args, **kwargs): + self._called('exchange_unbind') + + def close(self): + self._called('close') + + def message_to_python(self, message, *args, **kwargs): + self._called('message_to_python') + return Message(self, body=anyjson.dumps(message), + delivery_tag=next(self.deliveries), + throw_decode_error=self.throw_decode_error, + content_type='application/json', + content_encoding='utf-8') + + def flow(self, active): + self._called('flow') + + def basic_reject(self, delivery_tag, requeue=False): + if requeue: + return self._called('basic_reject:requeue') + return self._called('basic_reject') + + def basic_qos(self, prefetch_size=0, prefetch_count=0, + apply_global=False): + self._called('basic_qos') + + +class Connection(object): + connected = True + + def __init__(self, client): + self.client = client + + def channel(self): + return Channel(self) + + +class Transport(base.Transport): + + def establish_connection(self): + return Connection(self.client) + + def create_channel(self, connection): + return connection.channel() + + def drain_events(self, connection, **kwargs): + return 'event' + + def close_connection(self, connection): + connection.connected = False diff --git a/kombu/tests/test_clocks.py b/kombu/tests/test_clocks.py new file mode 100644 index 0000000..fa39b6d --- /dev/null +++ b/kombu/tests/test_clocks.py @@ -0,0 +1,104 @@ +from __future__ import absolute_import + +import pickle + +from heapq import heappush +from time import time + +from kombu.clocks import LamportClock, timetuple + +from .case import Mock, Case + + +class test_LamportClock(Case): + + def test_clocks(self): + c1 = LamportClock() + c2 = LamportClock() + + c1.forward() + c2.forward() + c1.forward() + c1.forward() + c2.adjust(c1.value) + self.assertEqual(c2.value, c1.value + 1) + self.assertTrue(repr(c1)) + + c2_val = c2.value + c2.forward() + c2.forward() + c2.adjust(c1.value) + self.assertEqual(c2.value, c2_val + 2 + 1) + + c1.adjust(c2.value) + self.assertEqual(c1.value, c2.value + 1) + + def test_sort(self): + c = LamportClock() + pid1 = 'a.example.com:312' + pid2 = 'b.example.com:311' + + events = [] + + m1 = (c.forward(), pid1) + heappush(events, m1) + m2 = (c.forward(), pid2) + heappush(events, m2) + m3 = (c.forward(), pid1) + heappush(events, m3) + m4 = (30, pid1) + heappush(events, m4) + m5 = (30, pid2) + heappush(events, m5) + + self.assertEqual(str(c), str(c.value)) + + self.assertEqual(c.sort_heap(events), m1) + self.assertEqual(c.sort_heap([m4, m5]), m4) + self.assertEqual(c.sort_heap([m4, m5, m1]), m4) + + +class test_timetuple(Case): + + def test_repr(self): + x = timetuple(133, time(), 'id', Mock()) + self.assertTrue(repr(x)) + + def test_pickleable(self): + x = timetuple(133, time(), 'id', 'obj') + self.assertEqual(pickle.loads(pickle.dumps(x)), tuple(x)) + + def test_order(self): + t1 = time() + t2 = time() + 300 # windows clock not reliable + a = timetuple(133, t1, 'A', 'obj') + b = timetuple(140, t1, 'A', 'obj') + self.assertTrue(a.__getnewargs__()) + self.assertEqual(a.clock, 133) + self.assertEqual(a.timestamp, t1) + self.assertEqual(a.id, 'A') + self.assertEqual(a.obj, 'obj') + self.assertTrue( + a <= b, + ) + self.assertTrue( + b >= a, + ) + + self.assertEqual( + timetuple(134, time(), 'A', 'obj').__lt__(tuple()), + NotImplemented, + ) + self.assertGreater( + timetuple(134, t2, 'A', 'obj'), + timetuple(133, t1, 'A', 'obj'), + ) + self.assertGreater( + timetuple(134, t1, 'B', 'obj'), + timetuple(134, t1, 'A', 'obj'), + ) + + self.assertGreater( + timetuple(None, t2, 'B', 'obj'), + timetuple(None, t1, 'A', 'obj'), + ) diff --git a/kombu/tests/test_common.py b/kombu/tests/test_common.py new file mode 100644 index 0000000..c4eebb7 --- /dev/null +++ b/kombu/tests/test_common.py @@ -0,0 +1,416 @@ +from __future__ import absolute_import + +import socket + +from amqp import RecoverableConnectionError + +from kombu import common +from kombu.common import ( + Broadcast, maybe_declare, + send_reply, collect_replies, + declaration_cached, ignore_errors, + QoS, PREFETCH_COUNT_MAX, +) + +from .case import Case, ContextMock, Mock, MockPool, patch + + +class test_ignore_errors(Case): + + def test_ignored(self): + connection = Mock() + connection.channel_errors = (KeyError, ) + connection.connection_errors = (KeyError, ) + + with ignore_errors(connection): + raise KeyError() + + def raising(): + raise KeyError() + + ignore_errors(connection, raising) + + connection.channel_errors = connection.connection_errors = \ + () + + with self.assertRaises(KeyError): + with ignore_errors(connection): + raise KeyError() + + +class test_declaration_cached(Case): + + def test_when_cached(self): + chan = Mock() + chan.connection.client.declared_entities = ['foo'] + self.assertTrue(declaration_cached('foo', chan)) + + def test_when_not_cached(self): + chan = Mock() + chan.connection.client.declared_entities = ['bar'] + self.assertFalse(declaration_cached('foo', chan)) + + +class test_Broadcast(Case): + + def test_arguments(self): + q = Broadcast(name='test_Broadcast') + self.assertTrue(q.name.startswith('bcast.')) + self.assertEqual(q.alias, 'test_Broadcast') + self.assertTrue(q.auto_delete) + self.assertEqual(q.exchange.name, 'test_Broadcast') + self.assertEqual(q.exchange.type, 'fanout') + + q = Broadcast('test_Broadcast', 'explicit_queue_name') + self.assertEqual(q.name, 'explicit_queue_name') + self.assertEqual(q.exchange.name, 'test_Broadcast') + + +class test_maybe_declare(Case): + + def test_cacheable(self): + channel = Mock() + client = channel.connection.client = Mock() + client.declared_entities = set() + entity = Mock() + entity.can_cache_declaration = True + entity.auto_delete = False + entity.is_bound = True + entity.channel = channel + + maybe_declare(entity, channel) + self.assertEqual(entity.declare.call_count, 1) + self.assertIn( + hash(entity), channel.connection.client.declared_entities, + ) + + maybe_declare(entity, channel) + self.assertEqual(entity.declare.call_count, 1) + + entity.channel.connection = None + with self.assertRaises(RecoverableConnectionError): + maybe_declare(entity) + + def test_binds_entities(self): + channel = Mock() + channel.connection.client.declared_entities = set() + entity = Mock() + entity.can_cache_declaration = True + entity.is_bound = False + entity.bind.return_value = entity + entity.bind.return_value.channel = channel + + maybe_declare(entity, channel) + entity.bind.assert_called_with(channel) + + def test_with_retry(self): + channel = Mock() + client = channel.connection.client = Mock() + client.declared_entities = set() + entity = Mock() + entity.can_cache_declaration = True + entity.is_bound = True + entity.channel = channel + + maybe_declare(entity, channel, retry=True) + self.assertTrue(channel.connection.client.ensure.call_count) + + +class test_replies(Case): + + def test_send_reply(self): + req = Mock() + req.content_type = 'application/json' + req.content_encoding = 'binary' + req.properties = {'reply_to': 'hello', + 'correlation_id': 'world'} + channel = Mock() + exchange = Mock() + exchange.is_bound = True + exchange.channel = channel + producer = Mock() + producer.channel = channel + producer.channel.connection.client.declared_entities = set() + send_reply(exchange, req, {'hello': 'world'}, producer) + + self.assertTrue(producer.publish.call_count) + args = producer.publish.call_args + self.assertDictEqual(args[0][0], {'hello': 'world'}) + self.assertDictEqual(args[1], {'exchange': exchange, + 'routing_key': 'hello', + 'correlation_id': 'world', + 'serializer': 'json', + 'retry': False, + 'retry_policy': None, + 'content_encoding': 'binary'}) + + @patch('kombu.common.itermessages') + def test_collect_replies_with_ack(self, itermessages): + conn, channel, queue = Mock(), Mock(), Mock() + body, message = Mock(), Mock() + itermessages.return_value = [(body, message)] + it = collect_replies(conn, channel, queue, no_ack=False) + m = next(it) + self.assertIs(m, body) + itermessages.assert_called_with(conn, channel, queue, no_ack=False) + message.ack.assert_called_with() + + with self.assertRaises(StopIteration): + next(it) + + channel.after_reply_message_received.assert_called_with(queue.name) + + @patch('kombu.common.itermessages') + def test_collect_replies_no_ack(self, itermessages): + conn, channel, queue = Mock(), Mock(), Mock() + body, message = Mock(), Mock() + itermessages.return_value = [(body, message)] + it = collect_replies(conn, channel, queue) + m = next(it) + self.assertIs(m, body) + itermessages.assert_called_with(conn, channel, queue, no_ack=True) + self.assertFalse(message.ack.called) + + @patch('kombu.common.itermessages') + def test_collect_replies_no_replies(self, itermessages): + conn, channel, queue = Mock(), Mock(), Mock() + itermessages.return_value = [] + it = collect_replies(conn, channel, queue) + with self.assertRaises(StopIteration): + next(it) + + self.assertFalse(channel.after_reply_message_received.called) + + +class test_insured(Case): + + @patch('kombu.common.logger') + def test_ensure_errback(self, logger): + common._ensure_errback('foo', 30) + self.assertTrue(logger.error.called) + + def test_revive_connection(self): + on_revive = Mock() + channel = Mock() + common.revive_connection(Mock(), channel, on_revive) + on_revive.assert_called_with(channel) + + common.revive_connection(Mock(), channel, None) + + def get_insured_mocks(self, insured_returns=('works', 'ignored')): + conn = ContextMock() + pool = MockPool(conn) + fun = Mock() + insured = conn.autoretry.return_value = Mock() + insured.return_value = insured_returns + return conn, pool, fun, insured + + def test_insured(self): + conn, pool, fun, insured = self.get_insured_mocks() + + ret = common.insured(pool, fun, (2, 2), {'foo': 'bar'}) + self.assertEqual(ret, 'works') + conn.ensure_connection.assert_called_with( + errback=common._ensure_errback, + ) + + self.assertTrue(insured.called) + i_args, i_kwargs = insured.call_args + self.assertTupleEqual(i_args, (2, 2)) + self.assertDictEqual(i_kwargs, {'foo': 'bar', + 'connection': conn}) + + self.assertTrue(conn.autoretry.called) + ar_args, ar_kwargs = conn.autoretry.call_args + self.assertTupleEqual(ar_args, (fun, conn.default_channel)) + self.assertTrue(ar_kwargs.get('on_revive')) + self.assertTrue(ar_kwargs.get('errback')) + + def test_insured_custom_errback(self): + conn, pool, fun, insured = self.get_insured_mocks() + + custom_errback = Mock() + common.insured(pool, fun, (2, 2), {'foo': 'bar'}, + errback=custom_errback) + conn.ensure_connection.assert_called_with(errback=custom_errback) + + +class MockConsumer(object): + consumers = set() + + def __init__(self, channel, queues=None, callbacks=None, **kwargs): + self.channel = channel + self.queues = queues + self.callbacks = callbacks + + def __enter__(self): + self.consumers.add(self) + return self + + def __exit__(self, *exc_info): + self.consumers.discard(self) + + +class test_itermessages(Case): + + class MockConnection(object): + should_raise_timeout = False + + def drain_events(self, **kwargs): + if self.should_raise_timeout: + raise socket.timeout() + for consumer in MockConsumer.consumers: + for callback in consumer.callbacks: + callback('body', 'message') + + def test_default(self): + conn = self.MockConnection() + channel = Mock() + channel.connection.client = conn + conn.Consumer = MockConsumer + it = common.itermessages(conn, channel, 'q', limit=1) + + ret = next(it) + self.assertTupleEqual(ret, ('body', 'message')) + + with self.assertRaises(StopIteration): + next(it) + + def test_when_raises_socket_timeout(self): + conn = self.MockConnection() + conn.should_raise_timeout = True + channel = Mock() + channel.connection.client = conn + conn.Consumer = MockConsumer + it = common.itermessages(conn, channel, 'q', limit=1) + + with self.assertRaises(StopIteration): + next(it) + + @patch('kombu.common.deque') + def test_when_raises_IndexError(self, deque): + deque_instance = deque.return_value = Mock() + deque_instance.popleft.side_effect = IndexError() + conn = self.MockConnection() + channel = Mock() + conn.Consumer = MockConsumer + it = common.itermessages(conn, channel, 'q', limit=1) + + with self.assertRaises(StopIteration): + next(it) + + +class test_QoS(Case): + + class _QoS(QoS): + def __init__(self, value): + self.value = value + QoS.__init__(self, None, value) + + def set(self, value): + return value + + def test_qos_exceeds_16bit(self): + with patch('kombu.common.logger') as logger: + callback = Mock() + qos = QoS(callback, 10) + qos.prev = 100 + # cannot use 2 ** 32 because of a bug on OSX Py2.5: + # https://jira.mongodb.org/browse/PYTHON-389 + qos.set(4294967296) + self.assertTrue(logger.warn.called) + callback.assert_called_with(prefetch_count=0) + + def test_qos_increment_decrement(self): + qos = self._QoS(10) + self.assertEqual(qos.increment_eventually(), 11) + self.assertEqual(qos.increment_eventually(3), 14) + self.assertEqual(qos.increment_eventually(-30), 14) + self.assertEqual(qos.decrement_eventually(7), 7) + self.assertEqual(qos.decrement_eventually(), 6) + + def test_qos_disabled_increment_decrement(self): + qos = self._QoS(0) + self.assertEqual(qos.increment_eventually(), 0) + self.assertEqual(qos.increment_eventually(3), 0) + self.assertEqual(qos.increment_eventually(-30), 0) + self.assertEqual(qos.decrement_eventually(7), 0) + self.assertEqual(qos.decrement_eventually(), 0) + self.assertEqual(qos.decrement_eventually(10), 0) + + def test_qos_thread_safe(self): + qos = self._QoS(10) + + def add(): + for i in range(1000): + qos.increment_eventually() + + def sub(): + for i in range(1000): + qos.decrement_eventually() + + def threaded(funs): + from threading import Thread + threads = [Thread(target=fun) for fun in funs] + for thread in threads: + thread.start() + for thread in threads: + thread.join() + + threaded([add, add]) + self.assertEqual(qos.value, 2010) + + qos.value = 1000 + threaded([add, sub]) # n = 2 + self.assertEqual(qos.value, 1000) + + def test_exceeds_short(self): + qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1) + qos.update() + self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1) + qos.increment_eventually() + self.assertEqual(qos.value, PREFETCH_COUNT_MAX) + qos.increment_eventually() + self.assertEqual(qos.value, PREFETCH_COUNT_MAX + 1) + qos.decrement_eventually() + self.assertEqual(qos.value, PREFETCH_COUNT_MAX) + qos.decrement_eventually() + self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1) + + def test_consumer_increment_decrement(self): + mconsumer = Mock() + qos = QoS(mconsumer.qos, 10) + qos.update() + self.assertEqual(qos.value, 10) + mconsumer.qos.assert_called_with(prefetch_count=10) + qos.decrement_eventually() + qos.update() + self.assertEqual(qos.value, 9) + mconsumer.qos.assert_called_with(prefetch_count=9) + qos.decrement_eventually() + self.assertEqual(qos.value, 8) + mconsumer.qos.assert_called_with(prefetch_count=9) + self.assertIn({'prefetch_count': 9}, mconsumer.qos.call_args) + + # Does not decrement 0 value + qos.value = 0 + qos.decrement_eventually() + self.assertEqual(qos.value, 0) + qos.increment_eventually() + self.assertEqual(qos.value, 0) + + def test_consumer_decrement_eventually(self): + mconsumer = Mock() + qos = QoS(mconsumer.qos, 10) + qos.decrement_eventually() + self.assertEqual(qos.value, 9) + qos.value = 0 + qos.decrement_eventually() + self.assertEqual(qos.value, 0) + + def test_set(self): + mconsumer = Mock() + qos = QoS(mconsumer.qos, 10) + qos.set(12) + self.assertEqual(qos.prev, 12) + qos.set(qos.prev) diff --git a/kombu/tests/test_compat.py b/kombu/tests/test_compat.py new file mode 100644 index 0000000..b081cf0 --- /dev/null +++ b/kombu/tests/test_compat.py @@ -0,0 +1,331 @@ +from __future__ import absolute_import + +from kombu import Connection, Exchange, Queue +from kombu import compat + +from .case import Case, Mock, patch +from .mocks import Transport, Channel + + +class test_misc(Case): + + def test_iterconsume(self): + + class MyConnection(object): + drained = 0 + + def drain_events(self, *args, **kwargs): + self.drained += 1 + return self.drained + + class Consumer(object): + active = False + + def consume(self, *args, **kwargs): + self.active = True + + conn = MyConnection() + consumer = Consumer() + it = compat._iterconsume(conn, consumer) + self.assertEqual(next(it), 1) + self.assertTrue(consumer.active) + + it2 = compat._iterconsume(conn, consumer, limit=10) + self.assertEqual(list(it2), [2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + + def test_Queue_from_dict(self): + defs = {'binding_key': 'foo.#', + 'exchange': 'fooex', + 'exchange_type': 'topic', + 'durable': True, + 'auto_delete': False} + + q1 = Queue.from_dict('foo', **dict(defs)) + self.assertEqual(q1.name, 'foo') + self.assertEqual(q1.routing_key, 'foo.#') + self.assertEqual(q1.exchange.name, 'fooex') + self.assertEqual(q1.exchange.type, 'topic') + self.assertTrue(q1.durable) + self.assertTrue(q1.exchange.durable) + self.assertFalse(q1.auto_delete) + self.assertFalse(q1.exchange.auto_delete) + + q2 = Queue.from_dict('foo', **dict(defs, + exchange_durable=False)) + self.assertTrue(q2.durable) + self.assertFalse(q2.exchange.durable) + + q3 = Queue.from_dict('foo', **dict(defs, + exchange_auto_delete=True)) + self.assertFalse(q3.auto_delete) + self.assertTrue(q3.exchange.auto_delete) + + q4 = Queue.from_dict('foo', **dict(defs, + queue_durable=False)) + self.assertFalse(q4.durable) + self.assertTrue(q4.exchange.durable) + + q5 = Queue.from_dict('foo', **dict(defs, + queue_auto_delete=True)) + self.assertTrue(q5.auto_delete) + self.assertFalse(q5.exchange.auto_delete) + + self.assertEqual(Queue.from_dict('foo', **dict(defs)), + Queue.from_dict('foo', **dict(defs))) + + +class test_Publisher(Case): + + def setUp(self): + self.connection = Connection(transport=Transport) + + def test_constructor(self): + pub = compat.Publisher(self.connection, + exchange='test_Publisher_constructor', + routing_key='rkey') + self.assertIsInstance(pub.backend, Channel) + self.assertEqual(pub.exchange.name, 'test_Publisher_constructor') + self.assertTrue(pub.exchange.durable) + self.assertFalse(pub.exchange.auto_delete) + self.assertEqual(pub.exchange.type, 'direct') + + pub2 = compat.Publisher(self.connection, + exchange='test_Publisher_constructor2', + routing_key='rkey', + auto_delete=True, + durable=False) + self.assertTrue(pub2.exchange.auto_delete) + self.assertFalse(pub2.exchange.durable) + + explicit = Exchange('test_Publisher_constructor_explicit', + type='topic') + pub3 = compat.Publisher(self.connection, + exchange=explicit) + self.assertEqual(pub3.exchange, explicit) + + compat.Publisher(self.connection, + exchange='test_Publisher_constructor3', + channel=self.connection.default_channel) + + def test_send(self): + pub = compat.Publisher(self.connection, + exchange='test_Publisher_send', + routing_key='rkey') + pub.send({'foo': 'bar'}) + self.assertIn('basic_publish', pub.backend) + pub.close() + + def test__enter__exit__(self): + pub = compat.Publisher(self.connection, + exchange='test_Publisher_send', + routing_key='rkey') + x = pub.__enter__() + self.assertIs(x, pub) + x.__exit__() + self.assertTrue(pub._closed) + + +class test_Consumer(Case): + + def setUp(self): + self.connection = Connection(transport=Transport) + + @patch('kombu.compat._iterconsume') + def test_iterconsume_calls__iterconsume(self, it, n='test_iterconsume'): + c = compat.Consumer(self.connection, queue=n, exchange=n) + c.iterconsume(limit=10, no_ack=True) + it.assert_called_with(c.connection, c, True, 10) + + def test_constructor(self, n='test_Consumer_constructor'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + self.assertIsInstance(c.backend, Channel) + q = c.queues[0] + self.assertTrue(q.durable) + self.assertTrue(q.exchange.durable) + self.assertFalse(q.auto_delete) + self.assertFalse(q.exchange.auto_delete) + self.assertEqual(q.name, n) + self.assertEqual(q.exchange.name, n) + + c2 = compat.Consumer(self.connection, queue=n + '2', + exchange=n + '2', + routing_key='rkey', durable=False, + auto_delete=True, exclusive=True) + q2 = c2.queues[0] + self.assertFalse(q2.durable) + self.assertFalse(q2.exchange.durable) + self.assertTrue(q2.auto_delete) + self.assertTrue(q2.exchange.auto_delete) + + def test__enter__exit__(self, n='test__enter__exit__'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + x = c.__enter__() + self.assertIs(x, c) + x.__exit__() + self.assertTrue(c._closed) + + def test_revive(self, n='test_revive'): + c = compat.Consumer(self.connection, queue=n, exchange=n) + + with self.connection.channel() as c2: + c.revive(c2) + self.assertIs(c.backend, c2) + + def test__iter__(self, n='test__iter__'): + c = compat.Consumer(self.connection, queue=n, exchange=n) + c.iterqueue = Mock() + + c.__iter__() + c.iterqueue.assert_called_with(infinite=True) + + def test_iter(self, n='test_iterqueue'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + c.close() + + def test_process_next(self, n='test_process_next'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + with self.assertRaises(NotImplementedError): + c.process_next() + c.close() + + def test_iterconsume(self, n='test_iterconsume'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + c.close() + + def test_discard_all(self, n='test_discard_all'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + c.discard_all() + self.assertIn('queue_purge', c.backend) + + def test_fetch(self, n='test_fetch'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + self.assertIsNone(c.fetch()) + self.assertIsNone(c.fetch(no_ack=True)) + self.assertIn('basic_get', c.backend) + + callback_called = [False] + + def receive(payload, message): + callback_called[0] = True + + c.backend.to_deliver.append('42') + payload = c.fetch().payload + self.assertEqual(payload, '42') + c.backend.to_deliver.append('46') + c.register_callback(receive) + self.assertEqual(c.fetch(enable_callbacks=True).payload, '46') + self.assertTrue(callback_called[0]) + + def test_discard_all_filterfunc_not_supported(self, n='xjf21j21'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + with self.assertRaises(NotImplementedError): + c.discard_all(filterfunc=lambda x: x) + c.close() + + def test_wait(self, n='test_wait'): + + class C(compat.Consumer): + + def iterconsume(self, limit=None): + for i in range(limit): + yield i + + c = C(self.connection, + queue=n, exchange=n, routing_key='rkey') + self.assertEqual(c.wait(10), list(range(10))) + c.close() + + def test_iterqueue(self, n='test_iterqueue'): + i = [0] + + class C(compat.Consumer): + + def fetch(self, limit=None): + z = i[0] + i[0] += 1 + return z + + c = C(self.connection, + queue=n, exchange=n, routing_key='rkey') + self.assertEqual(list(c.iterqueue(limit=10)), list(range(10))) + c.close() + + +class test_ConsumerSet(Case): + + def setUp(self): + self.connection = Connection(transport=Transport) + + def test_providing_channel(self): + chan = Mock(name='channel') + cs = compat.ConsumerSet(self.connection, channel=chan) + self.assertTrue(cs._provided_channel) + self.assertIs(cs.backend, chan) + + cs.cancel = Mock(name='cancel') + cs.close() + self.assertFalse(chan.close.called) + + @patch('kombu.compat._iterconsume') + def test_iterconsume(self, _iterconsume, n='test_iterconsume'): + c = compat.Consumer(self.connection, queue=n, exchange=n) + cs = compat.ConsumerSet(self.connection, consumers=[c]) + cs.iterconsume(limit=10, no_ack=True) + _iterconsume.assert_called_with(c.connection, cs, True, 10) + + def test_revive(self, n='test_revive'): + c = compat.Consumer(self.connection, queue=n, exchange=n) + cs = compat.ConsumerSet(self.connection, consumers=[c]) + + with self.connection.channel() as c2: + cs.revive(c2) + self.assertIs(cs.backend, c2) + + def test_constructor(self, prefix='0daf8h21'): + dcon = {'%s.xyx' % prefix: {'exchange': '%s.xyx' % prefix, + 'routing_key': 'xyx'}, + '%s.xyz' % prefix: {'exchange': '%s.xyz' % prefix, + 'routing_key': 'xyz'}} + consumers = [compat.Consumer(self.connection, queue=prefix + str(i), + exchange=prefix + str(i)) + for i in range(3)] + c = compat.ConsumerSet(self.connection, consumers=consumers) + c2 = compat.ConsumerSet(self.connection, from_dict=dcon) + + self.assertEqual(len(c.queues), 3) + self.assertEqual(len(c2.queues), 2) + + c.add_consumer(compat.Consumer(self.connection, + queue=prefix + 'xaxxxa', + exchange=prefix + 'xaxxxa')) + self.assertEqual(len(c.queues), 4) + for cq in c.queues: + self.assertIs(cq.channel, c.channel) + + c2.add_consumer_from_dict({ + '%s.xxx' % prefix: { + 'exchange': '%s.xxx' % prefix, + 'routing_key': 'xxx', + }, + }) + self.assertEqual(len(c2.queues), 3) + for c2q in c2.queues: + self.assertIs(c2q.channel, c2.channel) + + c.discard_all() + self.assertEqual(c.channel.called.count('queue_purge'), 4) + c.consume() + + c.close() + c2.close() + self.assertIn('basic_cancel', c.channel) + self.assertIn('close', c.channel) + self.assertIn('close', c2.channel) diff --git a/kombu/tests/test_compression.py b/kombu/tests/test_compression.py new file mode 100644 index 0000000..7d651ee --- /dev/null +++ b/kombu/tests/test_compression.py @@ -0,0 +1,50 @@ +from __future__ import absolute_import + +import sys + +from kombu import compression + +from .case import Case, SkipTest, mask_modules + + +class test_compression(Case): + + def setUp(self): + try: + import bz2 # noqa + except ImportError: + self.has_bzip2 = False + else: + self.has_bzip2 = True + + @mask_modules('bz2') + def test_no_bz2(self): + c = sys.modules.pop('kombu.compression') + try: + import kombu.compression + self.assertFalse(hasattr(kombu.compression, 'bz2')) + finally: + if c is not None: + sys.modules['kombu.compression'] = c + + def test_encoders(self): + encoders = compression.encoders() + self.assertIn('application/x-gzip', encoders) + if self.has_bzip2: + self.assertIn('application/x-bz2', encoders) + + def test_compress__decompress__zlib(self): + text = b'The Quick Brown Fox Jumps Over The Lazy Dog' + c, ctype = compression.compress(text, 'zlib') + self.assertNotEqual(text, c) + d = compression.decompress(c, ctype) + self.assertEqual(d, text) + + def test_compress__decompress__bzip2(self): + if not self.has_bzip2: + raise SkipTest('bzip2 not available') + text = b'The Brown Quick Fox Over The Lazy Dog Jumps' + c, ctype = compression.compress(text, 'bzip2') + self.assertNotEqual(text, c) + d = compression.decompress(c, ctype) + self.assertEqual(d, text) diff --git a/kombu/tests/test_connection.py b/kombu/tests/test_connection.py new file mode 100644 index 0000000..6bd3303 --- /dev/null +++ b/kombu/tests/test_connection.py @@ -0,0 +1,688 @@ +from __future__ import absolute_import + +import pickle +import socket + +from copy import copy + +from kombu import Connection, Consumer, Producer, parse_url +from kombu.connection import Resource +from kombu.five import items, range + +from .case import Case, Mock, SkipTest, patch, skip_if_not_module +from .mocks import Transport + + +class test_connection_utils(Case): + + def setUp(self): + self.url = 'amqp://user:pass@localhost:5672/my/vhost' + self.nopass = 'amqp://user:**@localhost:5672/my/vhost' + self.expected = { + 'transport': 'amqp', + 'userid': 'user', + 'password': 'pass', + 'hostname': 'localhost', + 'port': 5672, + 'virtual_host': 'my/vhost', + } + + def test_parse_url(self): + result = parse_url(self.url) + self.assertDictEqual(result, self.expected) + + def test_parse_generated_as_uri(self): + conn = Connection(self.url) + info = conn.info() + for k, v in self.expected.items(): + self.assertEqual(info[k], v) + # by default almost the same- no password + self.assertEqual(conn.as_uri(), self.nopass) + self.assertEqual(conn.as_uri(include_password=True), self.url) + + def test_as_uri_when_prefix(self): + conn = Connection('memory://') + conn.uri_prefix = 'foo' + self.assertTrue(conn.as_uri().startswith('foo+memory://')) + + @skip_if_not_module('pymongo') + def test_as_uri_when_mongodb(self): + x = Connection('mongodb://localhost') + self.assertTrue(x.as_uri()) + + def test_bogus_scheme(self): + with self.assertRaises(KeyError): + Connection('bogus://localhost:7421').transport + + def assert_info(self, conn, **fields): + info = conn.info() + for field, expected in items(fields): + self.assertEqual(info[field], expected) + + def test_rabbitmq_example_urls(self): + # see Appendix A of http://www.rabbitmq.com/uri-spec.html + + self.assert_info( + Connection('amqp://user:pass@host:10000/vhost'), + userid='user', password='pass', hostname='host', + port=10000, virtual_host='vhost', + ) + + self.assert_info( + Connection('amqp://user%61:%61pass@ho%61st:10000/v%2fhost'), + userid='usera', password='apass', hostname='hoast', + port=10000, virtual_host='v/host', + ) + + self.assert_info( + Connection('amqp://'), + userid='guest', password='guest', hostname='localhost', + port=5672, virtual_host='/', + ) + + self.assert_info( + Connection('amqp://:@/'), + userid='guest', password='guest', hostname='localhost', + port=5672, virtual_host='/', + ) + + self.assert_info( + Connection('amqp://user@/'), + userid='user', password='guest', hostname='localhost', + port=5672, virtual_host='/', + ) + + self.assert_info( + Connection('amqp://user:pass@/'), + userid='user', password='pass', hostname='localhost', + port=5672, virtual_host='/', + ) + + self.assert_info( + Connection('amqp://host'), + userid='guest', password='guest', hostname='host', + port=5672, virtual_host='/', + ) + + self.assert_info( + Connection('amqp://:10000'), + userid='guest', password='guest', hostname='localhost', + port=10000, virtual_host='/', + ) + + self.assert_info( + Connection('amqp:///vhost'), + userid='guest', password='guest', hostname='localhost', + port=5672, virtual_host='vhost', + ) + + self.assert_info( + Connection('amqp://host/'), + userid='guest', password='guest', hostname='host', + port=5672, virtual_host='/', + ) + + self.assert_info( + Connection('amqp://host/%2f'), + userid='guest', password='guest', hostname='host', + port=5672, virtual_host='/', + ) + + def test_url_IPV6(self): + raise SkipTest("urllib can't parse ipv6 urls") + + self.assert_info( + Connection('amqp://[::1]'), + userid='guest', password='guest', hostname='[::1]', + port=5672, virtual_host='/', + ) + + +class test_Connection(Case): + + def setUp(self): + self.conn = Connection(port=5672, transport=Transport) + + def test_establish_connection(self): + conn = self.conn + conn.connect() + self.assertTrue(conn.connection.connected) + self.assertEqual(conn.host, 'localhost:5672') + channel = conn.channel() + self.assertTrue(channel.open) + self.assertEqual(conn.drain_events(), 'event') + _connection = conn.connection + conn.close() + self.assertFalse(_connection.connected) + self.assertIsInstance(conn.transport, Transport) + + def test_multiple_urls(self): + conn1 = Connection('amqp://foo;amqp://bar') + self.assertEqual(conn1.hostname, 'foo') + self.assertListEqual(conn1.alt, ['amqp://foo', 'amqp://bar']) + + conn2 = Connection(['amqp://foo', 'amqp://bar']) + self.assertEqual(conn2.hostname, 'foo') + self.assertListEqual(conn2.alt, ['amqp://foo', 'amqp://bar']) + + def test_collect(self): + connection = Connection('memory://') + trans = connection._transport = Mock(name='transport') + _collect = trans._collect = Mock(name='transport._collect') + _close = connection._close = Mock(name='connection._close') + connection.declared_entities = Mock(name='decl_entities') + uconn = connection._connection = Mock(name='_connection') + connection.collect() + + self.assertFalse(_close.called) + _collect.assert_called_with(uconn) + connection.declared_entities.clear.assert_called_with() + self.assertIsNone(trans.client) + self.assertIsNone(connection._transport) + self.assertIsNone(connection._connection) + + def test_collect_no_transport(self): + connection = Connection('memory://') + connection._transport = None + connection._close = Mock() + connection.collect() + connection._close.assert_called_with() + + connection._close.side_effect = socket.timeout() + connection.collect() + + def test_collect_transport_gone(self): + connection = Connection('memory://') + uconn = connection._connection = Mock(name='conn._conn') + trans = connection._transport = Mock(name='transport') + collect = trans._collect = Mock(name='transport._collect') + + def se(conn): + connection._transport = None + collect.side_effect = se + + connection.collect() + collect.assert_called_with(uconn) + self.assertIsNone(connection._transport) + + def test_uri_passthrough(self): + transport = Mock(name='transport') + with patch('kombu.connection.get_transport_cls') as gtc: + gtc.return_value = transport + transport.can_parse_url = True + with patch('kombu.connection.parse_url') as parse_url: + c = Connection('foo+mysql://some_host') + self.assertEqual(c.transport_cls, 'foo') + self.assertFalse(parse_url.called) + self.assertEqual(c.hostname, 'mysql://some_host') + self.assertTrue(c.as_uri().startswith('foo+')) + with patch('kombu.connection.parse_url') as parse_url: + c = Connection('mysql://some_host', transport='foo') + self.assertEqual(c.transport_cls, 'foo') + self.assertFalse(parse_url.called) + self.assertEqual(c.hostname, 'mysql://some_host') + c = Connection('pyamqp+sqlite://some_host') + self.assertTrue(c.as_uri().startswith('pyamqp+')) + + def test_default_ensure_callback(self): + with patch('kombu.connection.logger') as logger: + c = Connection(transport=Mock) + c._default_ensure_callback(KeyError(), 3) + self.assertTrue(logger.error.called) + + def test_ensure_connection_on_error(self): + c = Connection('amqp://A;amqp://B') + with patch('kombu.connection.retry_over_time') as rot: + c.ensure_connection() + self.assertTrue(rot.called) + + args = rot.call_args[0] + cb = args[4] + intervals = iter([1, 2, 3, 4, 5]) + self.assertEqual(cb(KeyError(), intervals, 0), 0) + self.assertEqual(cb(KeyError(), intervals, 1), 1) + self.assertEqual(cb(KeyError(), intervals, 2), 0) + self.assertEqual(cb(KeyError(), intervals, 3), 2) + self.assertEqual(cb(KeyError(), intervals, 4), 0) + self.assertEqual(cb(KeyError(), intervals, 5), 3) + self.assertEqual(cb(KeyError(), intervals, 6), 0) + self.assertEqual(cb(KeyError(), intervals, 7), 4) + + errback = Mock() + c.ensure_connection(errback=errback) + args = rot.call_args[0] + cb = args[4] + self.assertEqual(cb(KeyError(), intervals, 0), 0) + self.assertTrue(errback.called) + + def test_supports_heartbeats(self): + c = Connection(transport=Mock) + c.transport.supports_heartbeats = False + self.assertFalse(c.supports_heartbeats) + + def test_is_evented(self): + c = Connection(transport=Mock) + c.transport.supports_ev = False + self.assertFalse(c.is_evented) + + def test_register_with_event_loop(self): + c = Connection(transport=Mock) + loop = Mock(name='loop') + c.register_with_event_loop(loop) + c.transport.register_with_event_loop.assert_called_with( + c.connection, loop, + ) + + def test_manager(self): + c = Connection(transport=Mock) + self.assertIs(c.manager, c.transport.manager) + + def test_copy(self): + c = Connection('amqp://example.com') + self.assertEqual(copy(c).info(), c.info()) + + def test_copy_multiples(self): + c = Connection('amqp://A.example.com;amqp://B.example.com') + self.assertTrue(c.alt) + d = copy(c) + self.assertEqual(d.alt, c.alt) + + def test_switch(self): + c = Connection('amqp://foo') + c._closed = True + c.switch('redis://example.com//3') + self.assertFalse(c._closed) + self.assertEqual(c.hostname, 'example.com') + self.assertEqual(c.transport_cls, 'redis') + self.assertEqual(c.virtual_host, '/3') + + def test_maybe_switch_next(self): + c = Connection('amqp://foo;redis://example.com//3') + c.maybe_switch_next() + self.assertFalse(c._closed) + self.assertEqual(c.hostname, 'example.com') + self.assertEqual(c.transport_cls, 'redis') + self.assertEqual(c.virtual_host, '/3') + + def test_maybe_switch_next_no_cycle(self): + c = Connection('amqp://foo') + c.maybe_switch_next() + self.assertFalse(c._closed) + self.assertEqual(c.hostname, 'foo') + self.assertIn(c.transport_cls, ('librabbitmq', 'pyamqp', 'amqp')) + + def test_heartbeat_check(self): + c = Connection(transport=Transport) + c.transport.heartbeat_check = Mock() + c.heartbeat_check(3) + c.transport.heartbeat_check.assert_called_with(c.connection, rate=3) + + def test_completes_cycle_no_cycle(self): + c = Connection('amqp://') + self.assertTrue(c.completes_cycle(0)) + self.assertTrue(c.completes_cycle(1)) + + def test_completes_cycle(self): + c = Connection('amqp://a;amqp://b;amqp://c') + self.assertFalse(c.completes_cycle(0)) + self.assertFalse(c.completes_cycle(1)) + self.assertTrue(c.completes_cycle(2)) + + def test__enter____exit__(self): + conn = self.conn + context = conn.__enter__() + self.assertIs(context, conn) + conn.connect() + self.assertTrue(conn.connection.connected) + conn.__exit__() + self.assertIsNone(conn.connection) + conn.close() # again + + def test_close_survives_connerror(self): + + class _CustomError(Exception): + pass + + class MyTransport(Transport): + connection_errors = (_CustomError, ) + + def close_connection(self, connection): + raise _CustomError('foo') + + conn = Connection(transport=MyTransport) + conn.connect() + conn.close() + self.assertTrue(conn._closed) + + def test_close_when_default_channel(self): + conn = self.conn + conn._default_channel = Mock() + conn._close() + conn._default_channel.close.assert_called_with() + + def test_close_when_default_channel_close_raises(self): + + class Conn(Connection): + + @property + def connection_errors(self): + return (KeyError, ) + + conn = Conn('memory://') + conn._default_channel = Mock() + conn._default_channel.close.side_effect = KeyError() + + conn._close() + conn._default_channel.close.assert_called_with() + + def test_revive_when_default_channel(self): + conn = self.conn + defchan = conn._default_channel = Mock() + conn.revive(Mock()) + + defchan.close.assert_called_with() + self.assertIsNone(conn._default_channel) + + def test_ensure_connection(self): + self.assertTrue(self.conn.ensure_connection()) + + def test_ensure_success(self): + def publish(): + return 'foobar' + + ensured = self.conn.ensure(None, publish) + self.assertEqual(ensured(), 'foobar') + + def test_ensure_failure(self): + class _CustomError(Exception): + pass + + def publish(): + raise _CustomError('bar') + + ensured = self.conn.ensure(None, publish) + with self.assertRaises(_CustomError): + ensured() + + def test_ensure_connection_failure(self): + class _ConnectionError(Exception): + pass + + def publish(): + raise _ConnectionError('failed connection') + + self.conn.transport.connection_errors = (_ConnectionError,) + ensured = self.conn.ensure(self.conn, publish) + with self.assertRaises(_ConnectionError): + ensured() + + def test_autoretry(self): + myfun = Mock() + myfun.__name__ = 'test_autoretry' + + self.conn.transport.connection_errors = (KeyError, ) + + def on_call(*args, **kwargs): + myfun.side_effect = None + raise KeyError('foo') + + myfun.side_effect = on_call + insured = self.conn.autoretry(myfun) + insured() + + self.assertTrue(myfun.called) + + def test_SimpleQueue(self): + conn = self.conn + q = conn.SimpleQueue('foo') + self.assertIs(q.channel, conn.default_channel) + chan = conn.channel() + q2 = conn.SimpleQueue('foo', channel=chan) + self.assertIs(q2.channel, chan) + + def test_SimpleBuffer(self): + conn = self.conn + q = conn.SimpleBuffer('foo') + self.assertIs(q.channel, conn.default_channel) + chan = conn.channel() + q2 = conn.SimpleBuffer('foo', channel=chan) + self.assertIs(q2.channel, chan) + + def test_Producer(self): + conn = self.conn + self.assertIsInstance(conn.Producer(), Producer) + self.assertIsInstance(conn.Producer(conn.default_channel), Producer) + + def test_Consumer(self): + conn = self.conn + self.assertIsInstance(conn.Consumer(queues=[]), Consumer) + self.assertIsInstance(conn.Consumer(queues=[], + channel=conn.default_channel), Consumer) + + def test__repr__(self): + self.assertTrue(repr(self.conn)) + + def test__reduce__(self): + x = pickle.loads(pickle.dumps(self.conn)) + self.assertDictEqual(x.info(), self.conn.info()) + + def test_channel_errors(self): + + class MyTransport(Transport): + channel_errors = (KeyError, ValueError) + + conn = Connection(transport=MyTransport) + self.assertTupleEqual(conn.channel_errors, (KeyError, ValueError)) + + def test_connection_errors(self): + + class MyTransport(Transport): + connection_errors = (KeyError, ValueError) + + conn = Connection(transport=MyTransport) + self.assertTupleEqual(conn.connection_errors, (KeyError, ValueError)) + + +class test_Connection_with_transport_options(Case): + + transport_options = {'pool_recycler': 3600, 'echo': True} + + def setUp(self): + self.conn = Connection(port=5672, transport=Transport, + transport_options=self.transport_options) + + def test_establish_connection(self): + conn = self.conn + self.assertEqual(conn.transport_options, self.transport_options) + + +class xResource(Resource): + + def setup(self): + pass + + +class ResourceCase(Case): + abstract = True + + def create_resource(self, limit, preload): + raise NotImplementedError('subclass responsibility') + + def assertState(self, P, avail, dirty): + self.assertEqual(P._resource.qsize(), avail) + self.assertEqual(len(P._dirty), dirty) + + def test_setup(self): + if self.abstract: + with self.assertRaises(NotImplementedError): + Resource() + + def test_acquire__release(self): + if self.abstract: + return + P = self.create_resource(10, 0) + self.assertState(P, 10, 0) + chans = [P.acquire() for _ in range(10)] + self.assertState(P, 0, 10) + with self.assertRaises(P.LimitExceeded): + P.acquire() + chans.pop().release() + self.assertState(P, 1, 9) + [chan.release() for chan in chans] + self.assertState(P, 10, 0) + + def test_acquire_prepare_raises(self): + if self.abstract: + return + P = self.create_resource(10, 0) + + self.assertEqual(len(P._resource.queue), 10) + P.prepare = Mock() + P.prepare.side_effect = IOError() + with self.assertRaises(IOError): + P.acquire(block=True) + self.assertEqual(len(P._resource.queue), 10) + + def test_acquire_no_limit(self): + if self.abstract: + return + P = self.create_resource(None, 0) + P.acquire().release() + + def test_replace_when_limit(self): + if self.abstract: + return + P = self.create_resource(10, 0) + r = P.acquire() + P._dirty = Mock() + P.close_resource = Mock() + + P.replace(r) + P._dirty.discard.assert_called_with(r) + P.close_resource.assert_called_with(r) + + def test_replace_no_limit(self): + if self.abstract: + return + P = self.create_resource(None, 0) + r = P.acquire() + P._dirty = Mock() + P.close_resource = Mock() + + P.replace(r) + self.assertFalse(P._dirty.discard.called) + P.close_resource.assert_called_with(r) + + def test_interface_prepare(self): + if not self.abstract: + return + x = xResource() + self.assertEqual(x.prepare(10), 10) + + def test_force_close_all_handles_AttributeError(self): + if self.abstract: + return + P = self.create_resource(10, 10) + cr = P.collect_resource = Mock() + cr.side_effect = AttributeError('x') + + P.acquire() + self.assertTrue(P._dirty) + + P.force_close_all() + + def test_force_close_all_no_mutex(self): + if self.abstract: + return + P = self.create_resource(10, 10) + P.close_resource = Mock() + + m = P._resource = Mock() + m.mutex = None + m.queue.pop.side_effect = IndexError + + P.force_close_all() + + def test_add_when_empty(self): + if self.abstract: + return + P = self.create_resource(None, None) + P._resource.queue[:] = [] + self.assertFalse(P._resource.queue) + P._add_when_empty() + self.assertTrue(P._resource.queue) + + +class test_ConnectionPool(ResourceCase): + abstract = False + + def create_resource(self, limit, preload): + return Connection(port=5672, transport=Transport).Pool(limit, preload) + + def test_setup(self): + P = self.create_resource(10, 2) + q = P._resource.queue + self.assertIsNotNone(q[0]._connection) + self.assertIsNotNone(q[1]._connection) + self.assertIsNone(q[2]()._connection) + + def test_acquire_raises_evaluated(self): + P = self.create_resource(1, 0) + # evaluate the connection first + r = P.acquire() + r.release() + P.prepare = Mock() + P.prepare.side_effect = MemoryError() + P.release = Mock() + with self.assertRaises(MemoryError): + with P.acquire(): + assert False + P.release.assert_called_with(r) + + def test_release_no__debug(self): + P = self.create_resource(10, 2) + R = Mock() + R._debug.side_effect = AttributeError() + P.release_resource(R) + + def test_setup_no_limit(self): + P = self.create_resource(None, None) + self.assertFalse(P._resource.queue) + self.assertIsNone(P.limit) + + def test_prepare_not_callable(self): + P = self.create_resource(None, None) + conn = Connection('memory://') + self.assertIs(P.prepare(conn), conn) + + def test_acquire_channel(self): + P = self.create_resource(10, 0) + with P.acquire_channel() as (conn, channel): + self.assertIs(channel, conn.default_channel) + + +class test_ChannelPool(ResourceCase): + abstract = False + + def create_resource(self, limit, preload): + return Connection(port=5672, transport=Transport) \ + .ChannelPool(limit, preload) + + def test_setup(self): + P = self.create_resource(10, 2) + q = P._resource.queue + self.assertTrue(q[0].basic_consume) + self.assertTrue(q[1].basic_consume) + with self.assertRaises(AttributeError): + getattr(q[2], 'basic_consume') + + def test_setup_no_limit(self): + P = self.create_resource(None, None) + self.assertFalse(P._resource.queue) + self.assertIsNone(P.limit) + + def test_prepare_not_callable(self): + P = self.create_resource(10, 0) + conn = Connection('memory://') + chan = conn.default_channel + self.assertIs(P.prepare(chan), chan) diff --git a/kombu/tests/test_entities.py b/kombu/tests/test_entities.py new file mode 100644 index 0000000..ef7591a --- /dev/null +++ b/kombu/tests/test_entities.py @@ -0,0 +1,366 @@ +from __future__ import absolute_import + +import pickle + +from kombu import Connection, Exchange, Producer, Queue, binding +from kombu.exceptions import NotBoundError + +from .case import Case, Mock, call +from .mocks import Transport + + +def get_conn(): + return Connection(transport=Transport) + + +class test_binding(Case): + + def test_constructor(self): + x = binding( + Exchange('foo'), 'rkey', + arguments={'barg': 'bval'}, + unbind_arguments={'uarg': 'uval'}, + ) + self.assertEqual(x.exchange, Exchange('foo')) + self.assertEqual(x.routing_key, 'rkey') + self.assertDictEqual(x.arguments, {'barg': 'bval'}) + self.assertDictEqual(x.unbind_arguments, {'uarg': 'uval'}) + + def test_declare(self): + chan = get_conn().channel() + x = binding(Exchange('foo'), 'rkey') + x.declare(chan) + self.assertIn('exchange_declare', chan) + + def test_declare_no_exchange(self): + chan = get_conn().channel() + x = binding() + x.declare(chan) + self.assertNotIn('exchange_declare', chan) + + def test_bind(self): + chan = get_conn().channel() + x = binding(Exchange('foo')) + x.bind(Exchange('bar')(chan)) + self.assertIn('exchange_bind', chan) + + def test_unbind(self): + chan = get_conn().channel() + x = binding(Exchange('foo')) + x.unbind(Exchange('bar')(chan)) + self.assertIn('exchange_unbind', chan) + + def test_repr(self): + b = binding(Exchange('foo'), 'rkey') + self.assertIn('foo', repr(b)) + self.assertIn('rkey', repr(b)) + + +class test_Exchange(Case): + + def test_bound(self): + exchange = Exchange('foo', 'direct') + self.assertFalse(exchange.is_bound) + self.assertIn('= 1: + self.c.should_stop = True + counter[0] += 1 + return counter + self.c.should_stop = False + consume.side_effect = se + self.c.run() + self.assertTrue(sleep.called) + + def test_run_raises(self): + conn = ContextMock(name='connection') + self.c.connection = conn + conn.connection_errors = (KeyError, ) + conn.channel_errors = () + consume = self.c.consume = Mock(name='c.consume') + + with patch('kombu.mixins.warn') as warn: + def se_raises(*args, **kwargs): + self.c.should_stop = True + raise KeyError('foo') + self.c.should_stop = False + consume.side_effect = se_raises + self.c.run() + self.assertTrue(warn.called) diff --git a/kombu/tests/test_pidbox.py b/kombu/tests/test_pidbox.py new file mode 100644 index 0000000..357de65 --- /dev/null +++ b/kombu/tests/test_pidbox.py @@ -0,0 +1,287 @@ +from __future__ import absolute_import + +import socket +import warnings + +from kombu import Connection +from kombu import pidbox +from kombu.exceptions import ContentDisallowed, InconsistencyError +from kombu.utils import uuid + +from .case import Case, Mock, patch + + +class test_Mailbox(Case): + + def _handler(self, state): + return self.stats['var'] + + def setUp(self): + + class Mailbox(pidbox.Mailbox): + + def _collect(self, *args, **kwargs): + return 'COLLECTED' + + self.mailbox = Mailbox('test_pidbox') + self.connection = Connection(transport='memory') + self.state = {'var': 1} + self.handlers = {'mymethod': self._handler} + self.bound = self.mailbox(self.connection) + self.default_chan = self.connection.channel() + self.node = self.bound.Node( + 'test_pidbox', + state=self.state, handlers=self.handlers, + channel=self.default_chan, + ) + + def test_publish_reply_ignores_InconsistencyError(self): + mailbox = pidbox.Mailbox('test_reply__collect')(self.connection) + with patch('kombu.pidbox.Producer') as Producer: + producer = Producer.return_value = Mock(name='producer') + producer.publish.side_effect = InconsistencyError() + mailbox._publish_reply( + {'foo': 'bar'}, mailbox.reply_exchange, mailbox.oid, 'foo', + ) + self.assertTrue(producer.publish.called) + + def test_reply__collect(self): + mailbox = pidbox.Mailbox('test_reply__collect')(self.connection) + exchange = mailbox.reply_exchange.name + channel = self.connection.channel() + mailbox.reply_queue(channel).declare() + + ticket = uuid() + mailbox._publish_reply({'foo': 'bar'}, exchange, mailbox.oid, ticket) + _callback_called = [False] + + def callback(body): + _callback_called[0] = True + + reply = mailbox._collect(ticket, limit=1, + callback=callback, channel=channel) + self.assertEqual(reply, [{'foo': 'bar'}]) + self.assertTrue(_callback_called[0]) + + ticket = uuid() + mailbox._publish_reply({'biz': 'boz'}, exchange, mailbox.oid, ticket) + reply = mailbox._collect(ticket, limit=1, channel=channel) + self.assertEqual(reply, [{'biz': 'boz'}]) + + mailbox._publish_reply({'foo': 'BAM'}, exchange, mailbox.oid, 'doom', + serializer='pickle') + with self.assertRaises(ContentDisallowed): + reply = mailbox._collect('doom', limit=1, channel=channel) + mailbox._publish_reply( + {'foo': 'BAMBAM'}, exchange, mailbox.oid, 'doom', + serializer='pickle', + ) + reply = mailbox._collect('doom', limit=1, channel=channel, + accept=['pickle']) + self.assertEqual(reply[0]['foo'], 'BAMBAM') + + de = mailbox.connection.drain_events = Mock() + de.side_effect = socket.timeout + mailbox._collect(ticket, limit=1, channel=channel) + + def test_constructor(self): + self.assertIsNone(self.mailbox.connection) + self.assertTrue(self.mailbox.exchange.name) + self.assertTrue(self.mailbox.reply_exchange.name) + + def test_bound(self): + bound = self.mailbox(self.connection) + self.assertIs(bound.connection, self.connection) + + def test_Node(self): + self.assertTrue(self.node.hostname) + self.assertTrue(self.node.state) + self.assertIs(self.node.mailbox, self.bound) + self.assertTrue(self.handlers) + + # No initial handlers + node2 = self.bound.Node('test_pidbox2', state=self.state) + self.assertDictEqual(node2.handlers, {}) + + def test_Node_consumer(self): + consumer1 = self.node.Consumer() + self.assertIs(consumer1.channel, self.default_chan) + self.assertTrue(consumer1.no_ack) + + chan2 = self.connection.channel() + consumer2 = self.node.Consumer(channel=chan2, no_ack=False) + self.assertIs(consumer2.channel, chan2) + self.assertFalse(consumer2.no_ack) + + def test_Node_consumer_multiple_listeners(self): + warnings.resetwarnings() + consumer = self.node.Consumer() + q = consumer.queues[0] + with warnings.catch_warnings(record=True) as log: + q.on_declared('foo', 1, 1) + self.assertTrue(log) + self.assertIn('already using this', log[0].message.args[0]) + + with warnings.catch_warnings(record=True) as log: + q.on_declared('foo', 1, 0) + self.assertFalse(log) + + def test_handler(self): + node = self.bound.Node('test_handler', state=self.state) + + @node.handler + def my_handler_name(state): + return 42 + + self.assertIn('my_handler_name', node.handlers) + + def test_dispatch(self): + node = self.bound.Node('test_dispatch', state=self.state) + + @node.handler + def my_handler_name(state, x=None, y=None): + return x + y + + self.assertEqual(node.dispatch('my_handler_name', + arguments={'x': 10, 'y': 10}), 20) + + def test_dispatch_raising_SystemExit(self): + node = self.bound.Node('test_dispatch_raising_SystemExit', + state=self.state) + + @node.handler + def my_handler_name(state): + raise SystemExit + + with self.assertRaises(SystemExit): + node.dispatch('my_handler_name') + + def test_dispatch_raising(self): + node = self.bound.Node('test_dispatch_raising', state=self.state) + + @node.handler + def my_handler_name(state): + raise KeyError('foo') + + res = node.dispatch('my_handler_name') + self.assertIn('error', res) + self.assertIn('KeyError', res['error']) + + def test_dispatch_replies(self): + _replied = [False] + + def reply(data, **options): + _replied[0] = True + + node = self.bound.Node('test_dispatch', state=self.state) + node.reply = reply + + @node.handler + def my_handler_name(state, x=None, y=None): + return x + y + + node.dispatch('my_handler_name', + arguments={'x': 10, 'y': 10}, + reply_to={'exchange': 'foo', 'routing_key': 'bar'}) + self.assertTrue(_replied[0]) + + def test_reply(self): + _replied = [(None, None, None)] + + def publish_reply(data, exchange, routing_key, ticket, **kwargs): + _replied[0] = (data, exchange, routing_key, ticket) + + mailbox = self.mailbox(self.connection) + mailbox._publish_reply = publish_reply + node = mailbox.Node('test_reply') + + @node.handler + def my_handler_name(state): + return 42 + + node.dispatch('my_handler_name', + reply_to={'exchange': 'exchange', + 'routing_key': 'rkey'}, + ticket='TICKET') + data, exchange, routing_key, ticket = _replied[0] + self.assertEqual(data, {'test_reply': 42}) + self.assertEqual(exchange, 'exchange') + self.assertEqual(routing_key, 'rkey') + self.assertEqual(ticket, 'TICKET') + + def test_handle_message(self): + node = self.bound.Node('test_dispatch_from_message') + + @node.handler + def my_handler_name(state, x=None, y=None): + return x * y + + body = {'method': 'my_handler_name', + 'arguments': {'x': 64, 'y': 64}} + + self.assertEqual(node.handle_message(body, None), 64 * 64) + + # message not for me should not be processed. + body['destination'] = ['some_other_node'] + self.assertIsNone(node.handle_message(body, None)) + + def test_handle_message_adjusts_clock(self): + node = self.bound.Node('test_adjusts_clock') + + @node.handler + def my_handler_name(state): + return 10 + + body = {'method': 'my_handler_name', + 'arguments': {}} + message = Mock(name='message') + message.headers = {'clock': 313} + node.adjust_clock = Mock(name='adjust_clock') + res = node.handle_message(body, message) + node.adjust_clock.assert_called_with(313) + self.assertEqual(res, 10) + + def test_listen(self): + consumer = self.node.listen() + self.assertEqual(consumer.callbacks[0], + self.node.handle_message) + self.assertEqual(consumer.channel, self.default_chan) + + def test_cast(self): + self.bound.cast(['somenode'], 'mymethod') + consumer = self.node.Consumer() + self.assertIsCast(self.get_next(consumer)) + + def test_abcast(self): + self.bound.abcast('mymethod') + consumer = self.node.Consumer() + self.assertIsCast(self.get_next(consumer)) + + def test_call_destination_must_be_sequence(self): + with self.assertRaises(ValueError): + self.bound.call('some_node', 'mymethod') + + def test_call(self): + self.assertEqual( + self.bound.call(['some_node'], 'mymethod'), + 'COLLECTED', + ) + consumer = self.node.Consumer() + self.assertIsCall(self.get_next(consumer)) + + def test_multi_call(self): + self.assertEqual(self.bound.multi_call('mymethod'), 'COLLECTED') + consumer = self.node.Consumer() + self.assertIsCall(self.get_next(consumer)) + + def get_next(self, consumer): + m = consumer.queues[0].get() + if m: + return m.payload + + def assertIsCast(self, message): + self.assertTrue(message['method']) + + def assertIsCall(self, message): + self.assertTrue(message['method']) + self.assertTrue(message['reply_to']) diff --git a/kombu/tests/test_pools.py b/kombu/tests/test_pools.py new file mode 100644 index 0000000..920c65a --- /dev/null +++ b/kombu/tests/test_pools.py @@ -0,0 +1,239 @@ +from __future__ import absolute_import + +from kombu import Connection, Producer +from kombu import pools +from kombu.connection import ConnectionPool +from kombu.utils import eqhash + +from .case import Case, Mock + + +class test_ProducerPool(Case): + Pool = pools.ProducerPool + + class MyPool(pools.ProducerPool): + + def __init__(self, *args, **kwargs): + self.instance = Mock() + pools.ProducerPool.__init__(self, *args, **kwargs) + + def Producer(self, connection): + return self.instance + + def setUp(self): + self.connections = Mock() + self.pool = self.Pool(self.connections, limit=10) + + def test_close_resource(self): + self.pool.close_resource(Mock(name='resource')) + + def test_releases_connection_when_Producer_raises(self): + self.pool.Producer = Mock() + self.pool.Producer.side_effect = IOError() + acq = self.pool._acquire_connection = Mock() + conn = acq.return_value = Mock() + with self.assertRaises(IOError): + self.pool.create_producer() + conn.release.assert_called_with() + + def test_prepare_release_connection_on_error(self): + pp = Mock() + p = pp.return_value = Mock() + p.revive.side_effect = IOError() + acq = self.pool._acquire_connection = Mock() + conn = acq.return_value = Mock() + p._channel = None + with self.assertRaises(IOError): + self.pool.prepare(pp) + conn.release.assert_called_with() + + def test_release_releases_connection(self): + p = Mock() + p.__connection__ = Mock() + self.pool.release(p) + p.__connection__.release.assert_called_with() + p.__connection__ = None + self.pool.release(p) + + def test_init(self): + self.assertIs(self.pool.connections, self.connections) + + def test_Producer(self): + self.assertIsInstance(self.pool.Producer(Mock()), Producer) + + def test_acquire_connection(self): + self.pool._acquire_connection() + self.connections.acquire.assert_called_with(block=True) + + def test_new(self): + promise = self.pool.new() + producer = promise() + self.assertIsInstance(producer, Producer) + self.connections.acquire.assert_called_with(block=True) + + def test_setup_unlimited(self): + pool = self.Pool(self.connections, limit=None) + pool.setup() + self.assertFalse(pool._resource.queue) + + def test_setup(self): + self.assertEqual(len(self.pool._resource.queue), self.pool.limit) + + first = self.pool._resource.get_nowait() + producer = first() + self.assertIsInstance(producer, Producer) + + def test_prepare(self): + connection = self.connections.acquire.return_value = Mock() + pool = self.MyPool(self.connections, limit=10) + pool.instance._channel = None + first = pool._resource.get_nowait() + producer = pool.prepare(first) + self.assertTrue(self.connections.acquire.called) + producer.revive.assert_called_with(connection) + + def test_prepare_channel_already_created(self): + self.connections.acquire.return_value = Mock() + pool = self.MyPool(self.connections, limit=10) + pool.instance._channel = Mock() + first = pool._resource.get_nowait() + self.connections.acquire.reset() + producer = pool.prepare(first) + self.assertFalse(producer.revive.called) + + def test_prepare_not_callable(self): + x = Producer(Mock) + self.pool.prepare(x) + + def test_release(self): + p = Mock() + p.channel = Mock() + p.__connection__ = Mock() + self.pool.release(p) + p.__connection__.release.assert_called_with() + self.assertIsNone(p.channel) + + +class test_PoolGroup(Case): + Group = pools.PoolGroup + + class MyGroup(pools.PoolGroup): + + def create(self, resource, limit): + return resource, limit + + def test_interface_create(self): + g = self.Group() + with self.assertRaises(NotImplementedError): + g.create(Mock(), 10) + + def test_getitem_using_global_limit(self): + pools._used[0] = False + g = self.MyGroup(limit=pools.use_global_limit) + res = g['foo'] + self.assertTupleEqual(res, ('foo', pools.get_limit())) + self.assertTrue(pools._used[0]) + + def test_getitem_using_custom_limit(self): + pools._used[0] = True + g = self.MyGroup(limit=102456) + res = g['foo'] + self.assertTupleEqual(res, ('foo', 102456)) + + def test_delitem(self): + g = self.MyGroup() + g['foo'] + del(g['foo']) + self.assertNotIn('foo', g) + + def test_Connections(self): + conn = Connection('memory://') + p = pools.connections[conn] + self.assertTrue(p) + self.assertIsInstance(p, ConnectionPool) + self.assertIs(p.connection, conn) + self.assertEqual(p.limit, pools.get_limit()) + + def test_Producers(self): + conn = Connection('memory://') + p = pools.producers[conn] + self.assertTrue(p) + self.assertIsInstance(p, pools.ProducerPool) + self.assertIs(p.connections, pools.connections[conn]) + self.assertEqual(p.limit, p.connections.limit) + self.assertEqual(p.limit, pools.get_limit()) + + def test_all_groups(self): + conn = Connection('memory://') + pools.connections[conn] + + self.assertTrue(list(pools._all_pools())) + + def test_reset(self): + pools.reset() + + class MyGroup(dict): + clear_called = False + + def clear(self): + self.clear_called = True + + p1 = pools.connections['foo'] = Mock() + g1 = MyGroup() + pools._groups.append(g1) + + pools.reset() + p1.force_close_all.assert_called_with() + self.assertTrue(g1.clear_called) + + p1 = pools.connections['foo'] = Mock() + p1.force_close_all.side_effect = KeyError() + pools.reset() + + def test_set_limit(self): + pools.reset() + pools.set_limit(34576) + limit = pools.get_limit() + self.assertEqual(limit, 34576) + + pools.connections[Connection('memory://')] + pools.set_limit(limit + 1) + self.assertEqual(pools.get_limit(), limit + 1) + limit = pools.get_limit() + with self.assertRaises(RuntimeError): + pools.set_limit(limit - 1) + pools.set_limit(limit - 1, force=True) + self.assertEqual(pools.get_limit(), limit - 1) + + pools.set_limit(pools.get_limit()) + + +class test_fun_PoolGroup(Case): + + def test_connections_behavior(self): + c1u = 'memory://localhost:123' + c2u = 'memory://localhost:124' + c1 = Connection(c1u) + c2 = Connection(c2u) + c3 = Connection(c1u) + + assert eqhash(c1) != eqhash(c2) + assert eqhash(c1) == eqhash(c3) + + c4 = Connection(c1u, transport_options={'confirm_publish': True}) + self.assertNotEqual(eqhash(c3), eqhash(c4)) + + p1 = pools.connections[c1] + p2 = pools.connections[c2] + p3 = pools.connections[c3] + + self.assertIsNot(p1, p2) + self.assertIs(p1, p3) + + r1 = p1.acquire() + self.assertTrue(p1._dirty) + self.assertTrue(p3._dirty) + self.assertFalse(p2._dirty) + r1.release() + self.assertFalse(p1._dirty) + self.assertFalse(p3._dirty) diff --git a/kombu/tests/test_serialization.py b/kombu/tests/test_serialization.py new file mode 100644 index 0000000..2312071 --- /dev/null +++ b/kombu/tests/test_serialization.py @@ -0,0 +1,348 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from __future__ import unicode_literals + +import sys + +from base64 import b64decode + +from kombu.exceptions import ContentDisallowed, EncodeError, DecodeError +from kombu.five import text_t, bytes_t +from kombu.serialization import ( + registry, register, SerializerNotInstalled, + raw_encode, register_yaml, register_msgpack, + dumps, loads, pickle, pickle_protocol, + unregister, register_pickle, enable_insecure_serializers, + disable_insecure_serializers, +) +from kombu.utils.encoding import str_to_bytes + +from .case import Case, call, mask_modules, patch, skip_if_not_module + +# For content_encoding tests +unicode_string = 'abcdé\u8463' +unicode_string_as_utf8 = unicode_string.encode('utf-8') +latin_string = 'abcdé' +latin_string_as_latin1 = latin_string.encode('latin-1') +latin_string_as_utf8 = latin_string.encode('utf-8') + + +# For serialization tests +py_data = { + 'string': 'The quick brown fox jumps over the lazy dog', + 'int': 10, + 'float': 3.14159265, + 'unicode': 'Thé quick brown fox jumps over thé lazy dog', + 'list': ['george', 'jerry', 'elaine', 'cosmo'], +} + +# JSON serialization tests +json_data = """\ +{"int": 10, "float": 3.1415926500000002, \ +"list": ["george", "jerry", "elaine", "cosmo"], \ +"string": "The quick brown fox jumps over the lazy \ +dog", "unicode": "Th\\u00e9 quick brown fox jumps over \ +th\\u00e9 lazy dog"}\ +""" + +# Pickle serialization tests +pickle_data = pickle.dumps(py_data, protocol=pickle_protocol) + +# YAML serialization tests +yaml_data = """\ +float: 3.1415926500000002 +int: 10 +list: [george, jerry, elaine, cosmo] +string: The quick brown fox jumps over the lazy dog +unicode: "Th\\xE9 quick brown fox jumps over th\\xE9 lazy dog" +""" + + +msgpack_py_data = dict(py_data) +# Unicode chars are lost in transmit :( +msgpack_py_data['unicode'] = 'Th quick brown fox jumps over th lazy dog' +msgpack_data = b64decode(str_to_bytes("""\ +haNpbnQKpWZsb2F0y0AJIftTyNTxpGxpc3SUpmdlb3JnZaVqZXJyeaZlbGFpbmWlY29zbW+mc3Rya\ +W5n2gArVGhlIHF1aWNrIGJyb3duIGZveCBqdW1wcyBvdmVyIHRoZSBsYXp5IGRvZ6d1bmljb2Rl2g\ +ApVGggcXVpY2sgYnJvd24gZm94IGp1bXBzIG92ZXIgdGggbGF6eSBkb2c=\ +""")) + + +def say(m): + sys.stderr.write('%s\n' % (m, )) + + +registry.register('testS', lambda s: s, lambda s: 'decoded', + 'application/testS', 'utf-8') + + +class test_Serialization(Case): + + def test_disable(self): + disabled = registry._disabled_content_types + try: + registry.disable('testS') + self.assertIn('application/testS', disabled) + disabled.clear() + + registry.disable('application/testS') + self.assertIn('application/testS', disabled) + finally: + disabled.clear() + + def test_enable(self): + registry._disabled_content_types.add('application/json') + registry.enable('json') + self.assertNotIn('application/json', registry._disabled_content_types) + registry._disabled_content_types.add('application/json') + registry.enable('application/json') + self.assertNotIn('application/json', registry._disabled_content_types) + + def test_loads_when_disabled(self): + disabled = registry._disabled_content_types + try: + registry.disable('testS') + + with self.assertRaises(SerializerNotInstalled): + loads('xxd', 'application/testS', 'utf-8', force=False) + + ret = loads('xxd', 'application/testS', 'utf-8', force=True) + self.assertEqual(ret, 'decoded') + finally: + disabled.clear() + + def test_loads_when_data_is_None(self): + loads(None, 'application/testS', 'utf-8') + + def test_content_type_decoding(self): + self.assertEqual( + unicode_string, + loads(unicode_string_as_utf8, + content_type='plain/text', content_encoding='utf-8'), + ) + self.assertEqual( + latin_string, + loads(latin_string_as_latin1, + content_type='application/data', content_encoding='latin-1'), + ) + + def test_content_type_binary(self): + self.assertIsInstance( + loads(unicode_string_as_utf8, + content_type='application/data', content_encoding='binary'), + bytes_t, + ) + + self.assertEqual( + unicode_string_as_utf8, + loads(unicode_string_as_utf8, + content_type='application/data', content_encoding='binary'), + ) + + def test_content_type_encoding(self): + # Using the 'raw' serializer + self.assertEqual( + unicode_string_as_utf8, + dumps(unicode_string, serializer='raw')[-1], + ) + self.assertEqual( + latin_string_as_utf8, + dumps(latin_string, serializer='raw')[-1], + ) + # And again w/o a specific serializer to check the + # code where we force unicode objects into a string. + self.assertEqual( + unicode_string_as_utf8, + dumps(unicode_string)[-1], + ) + self.assertEqual( + latin_string_as_utf8, + dumps(latin_string)[-1], + ) + + def test_enable_insecure_serializers(self): + with patch('kombu.serialization.registry') as registry: + enable_insecure_serializers() + registry.assert_has_calls([ + call.enable('pickle'), call.enable('yaml'), + call.enable('msgpack'), + ]) + registry.enable.side_effect = KeyError() + enable_insecure_serializers() + + with patch('kombu.serialization.registry') as registry: + enable_insecure_serializers(['msgpack']) + registry.assert_has_calls([call.enable('msgpack')]) + + def test_disable_insecure_serializers(self): + with patch('kombu.serialization.registry') as registry: + registry._decoders = ['pickle', 'yaml', 'doomsday'] + disable_insecure_serializers(allowed=['doomsday']) + registry.disable.assert_has_calls([call('pickle'), call('yaml')]) + registry.enable.assert_has_calls([call('doomsday')]) + disable_insecure_serializers(allowed=None) + registry.disable.assert_has_calls([ + call('pickle'), call('yaml'), call('doomsday') + ]) + + def test_reraises_EncodeError(self): + with self.assertRaises(EncodeError): + dumps([object()], serializer='json') + + def test_reraises_DecodeError(self): + with self.assertRaises(DecodeError): + loads(object(), content_type='application/json', + content_encoding='utf-8') + + def test_json_loads(self): + self.assertEqual( + py_data, + loads(json_data, + content_type='application/json', content_encoding='utf-8'), + ) + + def test_json_dumps(self): + self.assertEqual( + loads( + dumps(py_data, serializer='json')[-1], + content_type='application/json', + content_encoding='utf-8', + ), + loads( + json_data, + content_type='application/json', + content_encoding='utf-8', + ), + ) + + @skip_if_not_module('msgpack', (ImportError, ValueError)) + def test_msgpack_loads(self): + register_msgpack() + res = loads(msgpack_data, + content_type='application/x-msgpack', + content_encoding='binary') + if sys.version_info[0] < 3: + for k, v in res.items(): + if isinstance(v, text_t): + res[k] = v.encode() + if isinstance(v, (list, tuple)): + res[k] = [i.encode() for i in v] + self.assertEqual( + msgpack_py_data, + res, + ) + + @skip_if_not_module('msgpack', (ImportError, ValueError)) + def test_msgpack_dumps(self): + register_msgpack() + self.assertEqual( + loads( + dumps(msgpack_py_data, serializer='msgpack')[-1], + content_type='application/x-msgpack', + content_encoding='binary', + ), + loads( + msgpack_data, + content_type='application/x-msgpack', + content_encoding='binary', + ), + ) + + @skip_if_not_module('yaml') + def test_yaml_loads(self): + register_yaml() + self.assertEqual( + py_data, + loads(yaml_data, + content_type='application/x-yaml', + content_encoding='utf-8'), + ) + + @skip_if_not_module('yaml') + def test_yaml_dumps(self): + register_yaml() + self.assertEqual( + loads( + dumps(py_data, serializer='yaml')[-1], + content_type='application/x-yaml', + content_encoding='utf-8', + ), + loads( + yaml_data, + content_type='application/x-yaml', + content_encoding='utf-8', + ), + ) + + def test_pickle_loads(self): + self.assertEqual( + py_data, + loads(pickle_data, + content_type='application/x-python-serialize', + content_encoding='binary'), + ) + + def test_pickle_dumps(self): + self.assertEqual( + pickle.loads(pickle_data), + pickle.loads(dumps(py_data, serializer='pickle')[-1]), + ) + + def test_register(self): + register(None, None, None, None) + + def test_unregister(self): + with self.assertRaises(SerializerNotInstalled): + unregister('nonexisting') + dumps('foo', serializer='pickle') + unregister('pickle') + with self.assertRaises(SerializerNotInstalled): + dumps('foo', serializer='pickle') + register_pickle() + + def test_set_default_serializer_missing(self): + with self.assertRaises(SerializerNotInstalled): + registry._set_default_serializer('nonexisting') + + def test_dumps_missing(self): + with self.assertRaises(SerializerNotInstalled): + dumps('foo', serializer='nonexisting') + + def test_dumps__no_serializer(self): + ctyp, cenc, data = dumps(str_to_bytes('foo')) + self.assertEqual(ctyp, 'application/data') + self.assertEqual(cenc, 'binary') + + def test_loads__trusted_content(self): + loads('tainted', 'application/data', 'binary', accept=[]) + loads('tainted', 'application/text', 'utf-8', accept=[]) + + def test_loads__not_accepted(self): + with self.assertRaises(ContentDisallowed): + loads('tainted', 'application/x-evil', 'binary', accept=[]) + with self.assertRaises(ContentDisallowed): + loads('tainted', 'application/x-evil', 'binary', + accept=['application/x-json']) + self.assertTrue( + loads('tainted', 'application/x-doomsday', 'binary', + accept=['application/x-doomsday']) + ) + + def test_raw_encode(self): + self.assertTupleEqual( + raw_encode('foo'.encode('utf-8')), + ('application/data', 'binary', 'foo'.encode('utf-8')), + ) + + @mask_modules('yaml') + def test_register_yaml__no_yaml(self): + register_yaml() + with self.assertRaises(SerializerNotInstalled): + loads('foo', 'application/x-yaml', 'utf-8') + + @mask_modules('msgpack') + def test_register_msgpack__no_msgpack(self): + register_msgpack() + with self.assertRaises(SerializerNotInstalled): + loads('foo', 'application/x-msgpack', 'utf-8') diff --git a/kombu/tests/test_simple.py b/kombu/tests/test_simple.py new file mode 100644 index 0000000..53a4ac3 --- /dev/null +++ b/kombu/tests/test_simple.py @@ -0,0 +1,136 @@ +from __future__ import absolute_import + +from kombu import Connection, Exchange, Queue + +from .case import Case, Mock + + +class SimpleBase(Case): + abstract = True + + def Queue(self, name, *args, **kwargs): + q = name + if not isinstance(q, Queue): + q = self.__class__.__name__ + if name: + q = '%s.%s' % (q, name) + return self._Queue(q, *args, **kwargs) + + def _Queue(self, *args, **kwargs): + raise NotImplementedError() + + def setUp(self): + if not self.abstract: + self.connection = Connection(transport='memory') + with self.connection.channel() as channel: + channel.exchange_declare('amq.direct') + self.q = self.Queue(None, no_ack=True) + + def tearDown(self): + if not self.abstract: + self.q.close() + self.connection.close() + + def test_produce__consume(self): + if self.abstract: + return + q = self.Queue('test_produce__consume', no_ack=True) + + q.put({'hello': 'Simple'}) + + self.assertEqual(q.get(timeout=1).payload, {'hello': 'Simple'}) + with self.assertRaises(q.Empty): + q.get(timeout=0.1) + + def test_produce__basic_get(self): + if self.abstract: + return + q = self.Queue('test_produce__basic_get', no_ack=True) + q.put({'hello': 'SimpleSync'}) + self.assertEqual(q.get_nowait().payload, {'hello': 'SimpleSync'}) + with self.assertRaises(q.Empty): + q.get_nowait() + + q.put({'hello': 'SimpleSync'}) + self.assertEqual(q.get(block=False).payload, {'hello': 'SimpleSync'}) + with self.assertRaises(q.Empty): + q.get(block=False) + + def test_clear(self): + if self.abstract: + return + q = self.Queue('test_clear', no_ack=True) + + for i in range(10): + q.put({'hello': 'SimplePurge%d' % (i, )}) + + self.assertEqual(q.clear(), 10) + + def test_enter_exit(self): + if self.abstract: + return + q = self.Queue('test_enter_exit') + q.close = Mock() + + self.assertIs(q.__enter__(), q) + q.__exit__() + q.close.assert_called_with() + + def test_qsize(self): + if self.abstract: + return + q = self.Queue('test_clear', no_ack=True) + + for i in range(10): + q.put({'hello': 'SimplePurge%d' % (i, )}) + + self.assertEqual(q.qsize(), 10) + self.assertEqual(len(q), 10) + + def test_autoclose(self): + if self.abstract: + return + channel = self.connection.channel() + q = self.Queue('test_autoclose', no_ack=True, channel=channel) + q.close() + + def test_custom_Queue(self): + if self.abstract: + return + n = self.__class__.__name__ + exchange = Exchange('%s-test.custom.Queue' % (n, )) + queue = Queue('%s-test.custom.Queue' % (n, ), + exchange, + 'my.routing.key') + + q = self.Queue(queue) + self.assertEqual(q.consumer.queues[0], queue) + q.close() + + def test_bool(self): + if self.abstract: + return + q = self.Queue('test_nonzero') + self.assertTrue(q) + + +class test_SimpleQueue(SimpleBase): + abstract = False + + def _Queue(self, *args, **kwargs): + return self.connection.SimpleQueue(*args, **kwargs) + + def test_is_ack(self): + q = self.Queue('test_is_no_ack') + self.assertFalse(q.no_ack) + + +class test_SimpleBuffer(SimpleBase): + abstract = False + + def Queue(self, *args, **kwargs): + return self.connection.SimpleBuffer(*args, **kwargs) + + def test_is_no_ack(self): + q = self.Queue('test_is_no_ack') + self.assertTrue(q.no_ack) diff --git a/kombu/tests/test_syn.py b/kombu/tests/test_syn.py new file mode 100644 index 0000000..34e5803 --- /dev/null +++ b/kombu/tests/test_syn.py @@ -0,0 +1,61 @@ +from __future__ import absolute_import + +import socket +import sys +import types + +from kombu import syn + +from kombu.tests.case import Case, patch, module_exists + + +class test_syn(Case): + + def test_compat(self): + self.assertEqual(syn.blocking(lambda: 10), 10) + syn.select_blocking_method('foo') + + def test_detect_environment(self): + try: + syn._environment = None + X = syn.detect_environment() + self.assertEqual(syn._environment, X) + Y = syn.detect_environment() + self.assertEqual(Y, X) + finally: + syn._environment = None + + @module_exists('eventlet', 'eventlet.patcher') + def test_detect_environment_eventlet(self): + with patch('eventlet.patcher.is_monkey_patched', create=True) as m: + self.assertTrue(sys.modules['eventlet']) + m.return_value = True + env = syn._detect_environment() + m.assert_called_with(socket) + self.assertEqual(env, 'eventlet') + + @module_exists('gevent') + def test_detect_environment_gevent(self): + with patch('gevent.socket', create=True) as m: + prev, socket.socket = socket.socket, m.socket + try: + self.assertTrue(sys.modules['gevent']) + env = syn._detect_environment() + self.assertEqual(env, 'gevent') + finally: + socket.socket = prev + + def test_detect_environment_no_eventlet_or_gevent(self): + try: + sys.modules['eventlet'] = types.ModuleType('eventlet') + sys.modules['eventlet.patcher'] = types.ModuleType('eventlet') + self.assertEqual(syn._detect_environment(), 'default') + finally: + sys.modules.pop('eventlet', None) + syn._detect_environment() + try: + sys.modules['gevent'] = types.ModuleType('gevent') + self.assertEqual(syn._detect_environment(), 'default') + finally: + sys.modules.pop('gevent', None) + syn._detect_environment() diff --git a/kombu/tests/transport/__init__.py b/kombu/tests/transport/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kombu/tests/transport/test_SQS.py b/kombu/tests/transport/test_SQS.py new file mode 100644 index 0000000..e4efb53 --- /dev/null +++ b/kombu/tests/transport/test_SQS.py @@ -0,0 +1,296 @@ +"""Testing module for the kombu.transport.SQS package. + +NOTE: The SQSQueueMock and SQSConnectionMock classes originally come from +http://github.com/pcsforeducation/sqs-mock-python. They have been patched +slightly. +""" + +from __future__ import absolute_import + +from kombu import Connection +from kombu import messaging +from kombu import five +from kombu.tests.case import Case, SkipTest +import kombu + +try: + from kombu.transport import SQS +except ImportError: + # Boto must not be installed if the SQS transport fails to import, + # so we skip all unit tests. Set SQS to None here, and it will be + # checked during the setUp() phase later. + SQS = None + + +class SQSQueueMock(object): + + def __init__(self, name): + self.name = name + self.messages = [] + self._get_message_calls = 0 + + def clear(self, page_size=10, vtimeout=10): + empty, self.messages[:] = not self.messages, [] + return not empty + + def count(self, page_size=10, vtimeout=10): + return len(self.messages) + count_slow = count + + def delete(self): + self.messages[:] = [] + return True + + def delete_message(self, message): + try: + self.messages.remove(message) + except ValueError: + return False + return True + + def get_messages(self, num_messages=1, visibility_timeout=None, + attributes=None, *args, **kwargs): + self._get_message_calls += 1 + return self.messages[:num_messages] + + def read(self, visibility_timeout=None): + return self.messages.pop(0) + + def write(self, message): + self.messages.append(message) + return True + + +class SQSConnectionMock(object): + + def __init__(self): + self.queues = {} + + def get_queue(self, queue): + return self.queues.get(queue) + + def get_all_queues(self, prefix=""): + return self.queues.values() + + def delete_queue(self, queue, force_deletion=False): + q = self.get_queue(queue) + if q: + if q.count(): + return False + q.clear() + self.queues.pop(queue, None) + + def delete_message(self, queue, message): + return queue.delete_message(message) + + def create_queue(self, name, *args, **kwargs): + q = self.queues[name] = SQSQueueMock(name) + return q + + +class test_Channel(Case): + + def handleMessageCallback(self, message): + self.callback_message = message + + def setUp(self): + """Mock the back-end SQS classes""" + # Sanity check... if SQS is None, then it did not import and we + # cannot execute our tests. + if SQS is None: + raise SkipTest('Boto is not installed') + + SQS.Channel._queue_cache.clear() + + # Common variables used in the unit tests + self.queue_name = 'unittest' + + # Mock the sqs() method that returns an SQSConnection object and + # instead return an SQSConnectionMock() object. + self.sqs_conn_mock = SQSConnectionMock() + + def mock_sqs(): + return self.sqs_conn_mock + SQS.Channel.sqs = mock_sqs() + + # Set up a task exchange for passing tasks through the queue + self.exchange = kombu.Exchange('test_SQS', type='direct') + self.queue = kombu.Queue(self.queue_name, + self.exchange, + self.queue_name) + + # Mock up a test SQS Queue with the SQSQueueMock class (and always + # make sure its a clean empty queue) + self.sqs_queue_mock = SQSQueueMock(self.queue_name) + + # Now, create our Connection object with the SQS Transport and store + # the connection/channel objects as references for use in these tests. + self.connection = Connection(transport=SQS.Transport) + self.channel = self.connection.channel() + + self.queue(self.channel).declare() + self.producer = messaging.Producer(self.channel, + self.exchange, + routing_key=self.queue_name) + + # Lastly, make sure that we're set up to 'consume' this queue. + self.channel.basic_consume(self.queue_name, + no_ack=True, + callback=self.handleMessageCallback, + consumer_tag='unittest') + + def test_init(self): + """kombu.SQS.Channel instantiates correctly with mocked queues""" + self.assertIn(self.queue_name, self.channel._queue_cache) + + def test_new_queue(self): + queue_name = 'new_unittest_queue' + self.channel._new_queue(queue_name) + self.assertIn(queue_name, self.sqs_conn_mock.queues) + # For cleanup purposes, delete the queue and the queue file + self.channel._delete(queue_name) + + def test_delete(self): + queue_name = 'new_unittest_queue' + self.channel._new_queue(queue_name) + self.channel._delete(queue_name) + self.assertNotIn(queue_name, self.channel._queue_cache) + + def test_get_from_sqs(self): + # Test getting a single message + message = 'my test message' + self.producer.publish(message) + results = self.channel._get_from_sqs(self.queue_name) + self.assertEquals(len(results), 1) + + # Now test getting many messages + for i in xrange(3): + message = 'message: {0}'.format(i) + self.producer.publish(message) + + results = self.channel._get_from_sqs(self.queue_name, count=3) + self.assertEquals(len(results), 3) + + def test_get_with_empty_list(self): + with self.assertRaises(five.Empty): + self.channel._get(self.queue_name) + + def test_get_bulk_raises_empty(self): + with self.assertRaises(five.Empty): + self.channel._get_bulk(self.queue_name) + + def test_messages_to_python(self): + message_count = 3 + # Create several test messages and publish them + for i in range(message_count): + message = 'message: %s' % i + self.producer.publish(message) + + # Get the messages now + messages = self.channel._get_from_sqs( + self.queue_name, count=message_count, + ) + + # Now convert them to payloads + payloads = self.channel._messages_to_python( + messages, self.queue_name, + ) + + # We got the same number of payloads back, right? + self.assertEquals(len(payloads), message_count) + + # Make sure they're payload-style objects + for p in payloads: + self.assertTrue('properties' in p) + + def test_put_and_get(self): + message = 'my test message' + self.producer.publish(message) + results = self.queue(self.channel).get().payload + self.assertEquals(message, results) + + def test_puts_and_gets(self): + for i in xrange(3): + message = 'message: %s' % i + self.producer.publish(message) + + for i in xrange(3): + self.assertEquals('message: %s' % i, + self.queue(self.channel).get().payload) + + def test_put_and_get_bulk(self): + # With QoS.prefetch_count = 0 + message = 'my test message' + self.producer.publish(message) + results = self.channel._get_bulk(self.queue_name) + self.assertEquals(1, len(results)) + + def test_puts_and_get_bulk(self): + # Generate 8 messages + message_count = 8 + + # Set the prefetch_count to 5 + self.channel.qos.prefetch_count = 5 + + # Now, generate all the messages + for i in xrange(message_count): + message = 'message: %s' % i + self.producer.publish(message) + + # Count how many messages are retrieved the first time. Should + # be 5 (message_count). + results = self.channel._get_bulk(self.queue_name) + self.assertEquals(5, len(results)) + + # Now, do the get again, the number of messages returned should be 3. + results = self.channel._get_bulk(self.queue_name) + self.assertEquals(3, len(results)) + + def test_drain_events_with_empty_list(self): + def mock_can_consume(): + return False + self.channel.qos.can_consume = mock_can_consume + with self.assertRaises(five.Empty): + self.channel.drain_events() + + def test_drain_events_with_prefetch_5(self): + # Generate 20 messages + message_count = 20 + expected_get_message_count = 4 + + # Set the prefetch_count to 5 + self.channel.qos.prefetch_count = 5 + + # Now, generate all the messages + for i in xrange(message_count): + self.producer.publish('message: %s' % i) + + # Now drain all the events + for i in xrange(message_count): + self.channel.drain_events() + + # How many times was the SQSConnectionMock get_message method called? + self.assertEquals( + expected_get_message_count, + self.channel._queue_cache[self.queue_name]._get_message_calls) + + def test_drain_events_with_prefetch_none(self): + # Generate 20 messages + message_count = 20 + expected_get_message_count = 2 + + # Set the prefetch_count to None + self.channel.qos.prefetch_count = None + + # Now, generate all the messages + for i in xrange(message_count): + self.producer.publish('message: %s' % i) + + # Now drain all the events + for i in xrange(message_count): + self.channel.drain_events() + + # How many times was the SQSConnectionMock get_message method called? + self.assertEquals( + expected_get_message_count, + self.channel._queue_cache[self.queue_name]._get_message_calls) diff --git a/kombu/tests/transport/test_amqplib.py b/kombu/tests/transport/test_amqplib.py new file mode 100644 index 0000000..cf7d615 --- /dev/null +++ b/kombu/tests/transport/test_amqplib.py @@ -0,0 +1,162 @@ +from __future__ import absolute_import + +import sys + +from kombu import Connection + +from kombu.tests.case import Case, SkipTest, Mock, mask_modules + + +class MockConnection(dict): + + def __setattr__(self, key, value): + self[key] = value + +try: + __import__('amqplib') +except ImportError: + amqplib = Channel = None +else: + from kombu.transport import amqplib + + class Channel(amqplib.Channel): + wait_returns = [] + + def _x_open(self, *args, **kwargs): + pass + + def wait(self, *args, **kwargs): + return self.wait_returns + + def _send_method(self, *args, **kwargs): + pass + + +class amqplibCase(Case): + + def setUp(self): + if amqplib is None: + raise SkipTest('amqplib not installed') + self.setup() + + def setup(self): + pass + + +class test_Channel(amqplibCase): + + def setup(self): + self.conn = Mock() + self.conn.channels = {} + self.channel = Channel(self.conn, 0) + + def test_init(self): + self.assertFalse(self.channel.no_ack_consumers) + + def test_prepare_message(self): + self.assertTrue(self.channel.prepare_message( + 'foobar', 10, 'application/data', 'utf-8', + properties={}, + )) + + def test_message_to_python(self): + message = Mock() + message.headers = {} + message.properties = {} + self.assertTrue(self.channel.message_to_python(message)) + + def test_close_resolves_connection_cycle(self): + self.assertIsNotNone(self.channel.connection) + self.channel.close() + self.assertIsNone(self.channel.connection) + + def test_basic_consume_registers_ack_status(self): + self.channel.wait_returns = 'my-consumer-tag' + self.channel.basic_consume('foo', no_ack=True) + self.assertIn('my-consumer-tag', self.channel.no_ack_consumers) + + self.channel.wait_returns = 'other-consumer-tag' + self.channel.basic_consume('bar', no_ack=False) + self.assertNotIn('other-consumer-tag', self.channel.no_ack_consumers) + + self.channel.basic_cancel('my-consumer-tag') + self.assertNotIn('my-consumer-tag', self.channel.no_ack_consumers) + + +class test_Transport(amqplibCase): + + def setup(self): + self.connection = Connection('amqplib://') + self.transport = self.connection.transport + + def test_create_channel(self): + connection = Mock() + self.transport.create_channel(connection) + connection.channel.assert_called_with() + + def test_drain_events(self): + connection = Mock() + self.transport.drain_events(connection, timeout=10.0) + connection.drain_events.assert_called_with(timeout=10.0) + + def test_dnspython_localhost_resolve_bug(self): + + class Conn(object): + + def __init__(self, **kwargs): + vars(self).update(kwargs) + + self.transport.Connection = Conn + self.transport.client.hostname = 'localhost' + conn1 = self.transport.establish_connection() + self.assertEqual(conn1.host, '127.0.0.1:5672') + + self.transport.client.hostname = 'example.com' + conn2 = self.transport.establish_connection() + self.assertEqual(conn2.host, 'example.com:5672') + + def test_close_connection(self): + connection = Mock() + connection.client = Mock() + self.transport.close_connection(connection) + + self.assertIsNone(connection.client) + connection.close.assert_called_with() + + def test_verify_connection(self): + connection = Mock() + connection.channels = None + self.assertFalse(self.transport.verify_connection(connection)) + + connection.channels = {1: 1, 2: 2} + self.assertTrue(self.transport.verify_connection(connection)) + + @mask_modules('ssl') + def test_import_no_ssl(self): + pm = sys.modules.pop('kombu.transport.amqplib') + try: + from kombu.transport.amqplib import SSLError + self.assertEqual(SSLError.__module__, 'kombu.transport.amqplib') + finally: + if pm is not None: + sys.modules['kombu.transport.amqplib'] = pm + + +class test_amqplib(amqplibCase): + + def test_default_port(self): + + class Transport(amqplib.Transport): + Connection = MockConnection + + c = Connection(port=None, transport=Transport).connect() + self.assertEqual(c['host'], + '127.0.0.1:%s' % (Transport.default_port, )) + + def test_custom_port(self): + + class Transport(amqplib.Transport): + Connection = MockConnection + + c = Connection(port=1337, transport=Transport).connect() + self.assertEqual(c['host'], '127.0.0.1:1337') diff --git a/kombu/tests/transport/test_base.py b/kombu/tests/transport/test_base.py new file mode 100644 index 0000000..5c4a50d --- /dev/null +++ b/kombu/tests/transport/test_base.py @@ -0,0 +1,148 @@ +from __future__ import absolute_import + +from kombu import Connection, Consumer, Exchange, Producer, Queue +from kombu.five import text_t +from kombu.message import Message +from kombu.transport.base import StdChannel, Transport, Management + +from kombu.tests.case import Case, Mock + + +class test_StdChannel(Case): + + def setUp(self): + self.conn = Connection('memory://') + self.channel = self.conn.channel() + self.channel.queues.clear() + self.conn.connection.state.clear() + + def test_Consumer(self): + q = Queue('foo', Exchange('foo')) + print(self.channel.queues) + cons = self.channel.Consumer(q) + self.assertIsInstance(cons, Consumer) + self.assertIs(cons.channel, self.channel) + + def test_Producer(self): + prod = self.channel.Producer() + self.assertIsInstance(prod, Producer) + self.assertIs(prod.channel, self.channel) + + def test_interface_get_bindings(self): + with self.assertRaises(NotImplementedError): + StdChannel().get_bindings() + + def test_interface_after_reply_message_received(self): + self.assertIsNone( + StdChannel().after_reply_message_received(Queue('foo')), + ) + + +class test_Message(Case): + + def setUp(self): + self.conn = Connection('memory://') + self.channel = self.conn.channel() + self.message = Message(self.channel, delivery_tag=313) + + def test_postencode(self): + m = Message(self.channel, text_t('FOO'), postencode='ccyzz') + with self.assertRaises(LookupError): + m._reraise_error() + m.ack() + + def test_ack_respects_no_ack_consumers(self): + self.channel.no_ack_consumers = set(['abc']) + self.message.delivery_info['consumer_tag'] = 'abc' + ack = self.channel.basic_ack = Mock() + + self.message.ack() + self.assertNotEqual(self.message._state, 'ACK') + self.assertFalse(ack.called) + + def test_ack_missing_consumer_tag(self): + self.channel.no_ack_consumers = set(['abc']) + self.message.delivery_info = {} + ack = self.channel.basic_ack = Mock() + + self.message.ack() + ack.assert_called_with(self.message.delivery_tag) + + def test_ack_not_no_ack(self): + self.channel.no_ack_consumers = set() + self.message.delivery_info['consumer_tag'] = 'abc' + ack = self.channel.basic_ack = Mock() + + self.message.ack() + ack.assert_called_with(self.message.delivery_tag) + + def test_ack_log_error_when_no_error(self): + ack = self.message.ack = Mock() + self.message.ack_log_error(Mock(), KeyError) + ack.assert_called_with() + + def test_ack_log_error_when_error(self): + ack = self.message.ack = Mock() + ack.side_effect = KeyError('foo') + logger = Mock() + self.message.ack_log_error(logger, KeyError) + ack.assert_called_with() + self.assertTrue(logger.critical.called) + self.assertIn("Couldn't ack", logger.critical.call_args[0][0]) + + def test_reject_log_error_when_no_error(self): + reject = self.message.reject = Mock() + self.message.reject_log_error(Mock(), KeyError, requeue=True) + reject.assert_called_with(requeue=True) + + def test_reject_log_error_when_error(self): + reject = self.message.reject = Mock() + reject.side_effect = KeyError('foo') + logger = Mock() + self.message.reject_log_error(logger, KeyError) + reject.assert_called_with(requeue=False) + self.assertTrue(logger.critical.called) + self.assertIn("Couldn't reject", logger.critical.call_args[0][0]) + + +class test_interface(Case): + + def test_establish_connection(self): + with self.assertRaises(NotImplementedError): + Transport(None).establish_connection() + + def test_close_connection(self): + with self.assertRaises(NotImplementedError): + Transport(None).close_connection(None) + + def test_create_channel(self): + with self.assertRaises(NotImplementedError): + Transport(None).create_channel(None) + + def test_close_channel(self): + with self.assertRaises(NotImplementedError): + Transport(None).close_channel(None) + + def test_drain_events(self): + with self.assertRaises(NotImplementedError): + Transport(None).drain_events(None) + + def test_heartbeat_check(self): + Transport(None).heartbeat_check(Mock(name='connection')) + + def test_driver_version(self): + self.assertTrue(Transport(None).driver_version()) + + def test_register_with_event_loop(self): + Transport(None).register_with_event_loop(Mock(name='loop')) + + def test_manager(self): + self.assertTrue(Transport(None).manager) + + +class test_Management(Case): + + def test_get_bindings(self): + m = Management(Mock(name='transport')) + with self.assertRaises(NotImplementedError): + m.get_bindings() diff --git a/kombu/tests/transport/test_filesystem.py b/kombu/tests/transport/test_filesystem.py new file mode 100644 index 0000000..0649a8d --- /dev/null +++ b/kombu/tests/transport/test_filesystem.py @@ -0,0 +1,123 @@ +from __future__ import absolute_import + +import sys +import tempfile + +from kombu import Connection, Exchange, Queue, Consumer, Producer + +from kombu.tests.case import Case, SkipTest + + +class test_FilesystemTransport(Case): + + def setUp(self): + if sys.platform == 'win32': + raise SkipTest('Needs win32con module') + try: + data_folder_in = tempfile.mkdtemp() + data_folder_out = tempfile.mkdtemp() + except Exception: + raise SkipTest('filesystem transport: cannot create tempfiles') + self.c = Connection(transport='filesystem', + transport_options={ + 'data_folder_in': data_folder_in, + 'data_folder_out': data_folder_out, + }) + self.p = Connection(transport='filesystem', + transport_options={ + 'data_folder_in': data_folder_out, + 'data_folder_out': data_folder_in, + }) + self.e = Exchange('test_transport_filesystem') + self.q = Queue('test_transport_filesystem', + exchange=self.e, + routing_key='test_transport_filesystem') + self.q2 = Queue('test_transport_filesystem2', + exchange=self.e, + routing_key='test_transport_filesystem2') + + def test_produce_consume_noack(self): + producer = Producer(self.p.channel(), self.e) + consumer = Consumer(self.c.channel(), self.q, no_ack=True) + + for i in range(10): + producer.publish({'foo': i}, + routing_key='test_transport_filesystem') + + _received = [] + + def callback(message_data, message): + _received.append(message) + + consumer.register_callback(callback) + consumer.consume() + + while 1: + if len(_received) == 10: + break + self.c.drain_events() + + self.assertEqual(len(_received), 10) + + def test_produce_consume(self): + producer_channel = self.p.channel() + consumer_channel = self.c.channel() + producer = Producer(producer_channel, self.e) + consumer1 = Consumer(consumer_channel, self.q) + consumer2 = Consumer(consumer_channel, self.q2) + self.q2(consumer_channel).declare() + + for i in range(10): + producer.publish({'foo': i}, + routing_key='test_transport_filesystem') + for i in range(10): + producer.publish({'foo': i}, + routing_key='test_transport_filesystem2') + + _received1 = [] + _received2 = [] + + def callback1(message_data, message): + _received1.append(message) + message.ack() + + def callback2(message_data, message): + _received2.append(message) + message.ack() + + consumer1.register_callback(callback1) + consumer2.register_callback(callback2) + + consumer1.consume() + consumer2.consume() + + while 1: + if len(_received1) + len(_received2) == 20: + break + self.c.drain_events() + + self.assertEqual(len(_received1) + len(_received2), 20) + + # compression + producer.publish({'compressed': True}, + routing_key='test_transport_filesystem', + compression='zlib') + m = self.q(consumer_channel).get() + self.assertDictEqual(m.payload, {'compressed': True}) + + # queue.delete + for i in range(10): + producer.publish({'foo': i}, + routing_key='test_transport_filesystem') + self.assertTrue(self.q(consumer_channel).get()) + self.q(consumer_channel).delete() + self.q(consumer_channel).declare() + self.assertIsNone(self.q(consumer_channel).get()) + + # queue.purge + for i in range(10): + producer.publish({'foo': i}, + routing_key='test_transport_filesystem2') + self.assertTrue(self.q2(consumer_channel).get()) + self.q2(consumer_channel).purge() + self.assertIsNone(self.q2(consumer_channel).get()) diff --git a/kombu/tests/transport/test_librabbitmq.py b/kombu/tests/transport/test_librabbitmq.py new file mode 100644 index 0000000..a50b262 --- /dev/null +++ b/kombu/tests/transport/test_librabbitmq.py @@ -0,0 +1,150 @@ +from __future__ import absolute_import + +try: + import librabbitmq +except ImportError: + librabbitmq = None # noqa +else: + from kombu.transport import librabbitmq # noqa + +from kombu.tests.case import Case, Mock, SkipTest, patch + + +class lrmqCase(Case): + + def setUp(self): + if librabbitmq is None: + raise SkipTest('librabbitmq is not installed') + + +class test_Message(lrmqCase): + + def test_init(self): + chan = Mock(name='channel') + message = librabbitmq.Message( + chan, {'prop': 42}, {'delivery_tag': 337}, 'body', + ) + self.assertEqual(message.body, 'body') + self.assertEqual(message.delivery_tag, 337) + self.assertEqual(message.properties['prop'], 42) + + +class test_Channel(lrmqCase): + + def test_prepare_message(self): + conn = Mock(name='connection') + chan = librabbitmq.Channel(conn, 1) + self.assertTrue(chan) + + body = 'the quick brown fox...' + properties = {'name': 'Elaine M.'} + + body2, props2 = chan.prepare_message( + body, properties=properties, + priority=999, + content_type='ctype', + content_encoding='cenc', + headers={'H': 2}, + ) + + self.assertEqual(props2['name'], 'Elaine M.') + self.assertEqual(props2['priority'], 999) + self.assertEqual(props2['content_type'], 'ctype') + self.assertEqual(props2['content_encoding'], 'cenc') + self.assertEqual(props2['headers'], {'H': 2}) + self.assertEqual(body2, body) + + body3, props3 = chan.prepare_message(body, priority=777) + self.assertEqual(props3['priority'], 777) + self.assertEqual(body3, body) + + +class test_Transport(lrmqCase): + + def setUp(self): + super(test_Transport, self).setUp() + self.client = Mock(name='client') + self.T = librabbitmq.Transport(self.client) + + def test_driver_version(self): + self.assertTrue(self.T.driver_version()) + + def test_create_channel(self): + conn = Mock(name='connection') + chan = self.T.create_channel(conn) + self.assertTrue(chan) + conn.channel.assert_called_with() + + def test_drain_events(self): + conn = Mock(name='connection') + self.T.drain_events(conn, timeout=1.33) + conn.drain_events.assert_called_with(timeout=1.33) + + def test_establish_connection_SSL_not_supported(self): + self.client.ssl = True + with self.assertRaises(NotImplementedError): + self.T.establish_connection() + + def test_establish_connection(self): + self.T.Connection = Mock(name='Connection') + self.T.client.ssl = False + self.T.client.port = None + self.T.client.transport_options = {} + + conn = self.T.establish_connection() + self.assertEqual( + self.T.client.port, + self.T.default_connection_params['port'], + ) + self.assertEqual(conn.client, self.T.client) + self.assertEqual(self.T.client.drain_events, conn.drain_events) + + def test_collect__no_conn(self): + self.T.client.drain_events = 1234 + self.T._collect(None) + self.assertIsNone(self.client.drain_events) + self.assertIsNone(self.T.client) + + def test_collect__with_conn(self): + self.T.client.drain_events = 1234 + conn = Mock(name='connection') + chans = conn.channels = {1: Mock(name='chan1'), 2: Mock(name='chan2')} + conn.callbacks = {'foo': Mock(name='cb1'), 'bar': Mock(name='cb2')} + for i, chan in enumerate(conn.channels.values()): + chan.connection = i + + with patch('os.close') as close: + self.T._collect(conn) + close.assert_called_with(conn.fileno()) + self.assertFalse(conn.channels) + self.assertFalse(conn.callbacks) + for chan in chans.values(): + self.assertIsNone(chan.connection) + self.assertIsNone(self.client.drain_events) + self.assertIsNone(self.T.client) + + with patch('os.close') as close: + self.T.client = self.client + close.side_effect = OSError() + self.T._collect(conn) + close.assert_called_with(conn.fileno()) + + def test_register_with_event_loop(self): + conn = Mock(name='conn') + loop = Mock(name='loop') + self.T.register_with_event_loop(conn, loop) + loop.add_reader.assert_called_with( + conn.fileno(), self.T.on_readable, conn, loop, + ) + + def test_verify_connection(self): + conn = Mock(name='connection') + conn.connected = True + self.assertTrue(self.T.verify_connection(conn)) + + def test_close_connection(self): + conn = Mock(name='connection') + self.client.drain_events = 1234 + self.T.close_connection(conn) + self.assertIsNone(self.client.drain_events) + conn.close.assert_called_with() diff --git a/kombu/tests/transport/test_memory.py b/kombu/tests/transport/test_memory.py new file mode 100644 index 0000000..605527f --- /dev/null +++ b/kombu/tests/transport/test_memory.py @@ -0,0 +1,157 @@ +from __future__ import absolute_import + +import socket + +from kombu import Connection, Exchange, Queue, Consumer, Producer + +from kombu.tests.case import Case + + +class test_MemoryTransport(Case): + + def setUp(self): + self.c = Connection(transport='memory') + self.e = Exchange('test_transport_memory') + self.q = Queue('test_transport_memory', + exchange=self.e, + routing_key='test_transport_memory') + self.q2 = Queue('test_transport_memory2', + exchange=self.e, + routing_key='test_transport_memory2') + self.fanout = Exchange('test_transport_memory_fanout', type='fanout') + self.q3 = Queue('test_transport_memory_fanout1', + exchange=self.fanout) + self.q4 = Queue('test_transport_memory_fanout2', + exchange=self.fanout) + + def test_driver_version(self): + self.assertTrue(self.c.transport.driver_version()) + + def test_produce_consume_noack(self): + channel = self.c.channel() + producer = Producer(channel, self.e) + consumer = Consumer(channel, self.q, no_ack=True) + + for i in range(10): + producer.publish({'foo': i}, routing_key='test_transport_memory') + + _received = [] + + def callback(message_data, message): + _received.append(message) + + consumer.register_callback(callback) + consumer.consume() + + while 1: + if len(_received) == 10: + break + self.c.drain_events() + + self.assertEqual(len(_received), 10) + + def test_produce_consume_fanout(self): + producer = self.c.Producer() + consumer = self.c.Consumer([self.q3, self.q4]) + + producer.publish( + {'hello': 'world'}, + declare=consumer.queues, + exchange=self.fanout, + ) + + self.assertEqual(self.q3(self.c).get().payload, {'hello': 'world'}) + self.assertEqual(self.q4(self.c).get().payload, {'hello': 'world'}) + self.assertIsNone(self.q3(self.c).get()) + self.assertIsNone(self.q4(self.c).get()) + + def test_produce_consume(self): + channel = self.c.channel() + producer = Producer(channel, self.e) + consumer1 = Consumer(channel, self.q) + consumer2 = Consumer(channel, self.q2) + self.q2(channel).declare() + + for i in range(10): + producer.publish({'foo': i}, routing_key='test_transport_memory') + for i in range(10): + producer.publish({'foo': i}, routing_key='test_transport_memory2') + + _received1 = [] + _received2 = [] + + def callback1(message_data, message): + _received1.append(message) + message.ack() + + def callback2(message_data, message): + _received2.append(message) + message.ack() + + consumer1.register_callback(callback1) + consumer2.register_callback(callback2) + + consumer1.consume() + consumer2.consume() + + while 1: + if len(_received1) + len(_received2) == 20: + break + self.c.drain_events() + + self.assertEqual(len(_received1) + len(_received2), 20) + + # compression + producer.publish({'compressed': True}, + routing_key='test_transport_memory', + compression='zlib') + m = self.q(channel).get() + self.assertDictEqual(m.payload, {'compressed': True}) + + # queue.delete + for i in range(10): + producer.publish({'foo': i}, routing_key='test_transport_memory') + self.assertTrue(self.q(channel).get()) + self.q(channel).delete() + self.q(channel).declare() + self.assertIsNone(self.q(channel).get()) + + # queue.purge + for i in range(10): + producer.publish({'foo': i}, routing_key='test_transport_memory2') + self.assertTrue(self.q2(channel).get()) + self.q2(channel).purge() + self.assertIsNone(self.q2(channel).get()) + + def test_drain_events(self): + with self.assertRaises(socket.timeout): + self.c.drain_events(timeout=0.1) + + c1 = self.c.channel() + c2 = self.c.channel() + + with self.assertRaises(socket.timeout): + self.c.drain_events(timeout=0.1) + + del(c1) # so pyflakes doesn't complain. + del(c2) + + def test_drain_events_unregistered_queue(self): + c1 = self.c.channel() + + class Cycle(object): + + def get(self, timeout=None): + return ('foo', 'foo'), c1 + + self.c.transport.cycle = Cycle() + with self.assertRaises(KeyError): + self.c.drain_events() + + def test_queue_for(self): + chan = self.c.channel() + chan.queues.clear() + + x = chan._queue_for('foo') + self.assertTrue(x) + self.assertIs(chan._queue_for('foo'), x) diff --git a/kombu/tests/transport/test_mongodb.py b/kombu/tests/transport/test_mongodb.py new file mode 100644 index 0000000..b4d10fc --- /dev/null +++ b/kombu/tests/transport/test_mongodb.py @@ -0,0 +1,120 @@ +from __future__ import absolute_import + +from kombu import Connection + +from kombu.tests.case import Case, SkipTest, Mock, skip_if_not_module + + +class MockConnection(dict): + + def __setattr__(self, key, value): + self[key] = value + + +class test_mongodb(Case): + + def _get_connection(self, url, **kwargs): + from kombu.transport import mongodb + + class _Channel(mongodb.Channel): + + def _create_client(self): + self._client = Mock(name='client') + + class Transport(mongodb.Transport): + Connection = MockConnection + Channel = _Channel + + return Connection(url, transport=Transport, **kwargs).connect() + + @skip_if_not_module('pymongo') + def test_defaults(self): + url = 'mongodb://' + + c = self._get_connection(url) + hostname, dbname, options = c.channels[0]._parse_uri() + + self.assertEquals(dbname, 'kombu_default') + self.assertEquals(hostname, 'mongodb://127.0.0.1') + + @skip_if_not_module('pymongo') + def test_custom_host(self): + url = 'mongodb://localhost' + c = self._get_connection(url) + hostname, dbname, options = c.channels[0]._parse_uri() + + self.assertEquals(dbname, 'kombu_default') + + @skip_if_not_module('pymongo') + def test_custom_database(self): + url = 'mongodb://localhost/dbname' + c = self._get_connection(url) + hostname, dbname, options = c.channels[0]._parse_uri() + + self.assertEquals(dbname, 'dbname') + + @skip_if_not_module('pymongo') + def test_custom_credentials(self): + url = 'mongodb://localhost/dbname' + c = self._get_connection(url, userid='foo', password='bar') + hostname, dbname, options = c.channels[0]._parse_uri() + + self.assertEquals(hostname, 'mongodb://foo:bar@localhost/dbname') + self.assertEquals(dbname, 'dbname') + + @skip_if_not_module('pymongo') + def test_options(self): + url = 'mongodb://localhost,localhost2:29017/dbname?safe=true' + c = self._get_connection(url) + + hostname, dbname, options = c.channels[0]._parse_uri() + + self.assertEqual(options['safe'], True) + + @skip_if_not_module('pymongo') + def test_real_connections(self): + from pymongo.errors import ConfigurationError + + raise SkipTest( + 'Test is functional: it actually connects to mongod') + + url = 'mongodb://localhost,localhost:29017/dbname' + c = self._get_connection(url) + client = c.channels[0].client + + nodes = client.connection.nodes + # If there's just 1 node it is because we're connecting to a single + # server instead of a repl / mongoss. + if len(nodes) == 2: + self.assertTrue(('localhost', 29017) in nodes) + self.assertEquals(client.name, 'dbname') + + url = 'mongodb://localhost:27017,localhost2:29017/dbname' + c = self._get_connection(url) + client = c.channels[0].client + + # Login to admin db since there's no db specified + url = 'mongodb://adminusername:adminpassword@localhost' + c = self._get_connection() + client = c.channels[0].client + self.assertEquals(client.name, 'kombu_default') + + # Lets make sure that using admin db doesn't break anything + # when no user is specified + url = 'mongodb://localhost' + c = self._get_connection(url) + client = c.channels[0].client + + # Assuming there's user 'username' with password 'password' + # configured in mongodb + url = 'mongodb://username:password@localhost/dbname' + c = self._get_connection(url) + client = c.channels[0].client + + # Assuming there's no user 'nousername' with password 'nopassword' + # configured in mongodb + url = 'mongodb://nousername:nopassword@localhost/dbname' + c = self._get_connection(url) + + with self.assertRaises(ConfigurationError): + c.channels[0].client diff --git a/kombu/tests/transport/test_pyamqp.py b/kombu/tests/transport/test_pyamqp.py new file mode 100644 index 0000000..d6a910b --- /dev/null +++ b/kombu/tests/transport/test_pyamqp.py @@ -0,0 +1,179 @@ +from __future__ import absolute_import + +import sys + +from itertools import count + +try: + import amqp # noqa +except ImportError: + pyamqp = None # noqa +else: + from kombu.transport import pyamqp +from kombu import Connection +from kombu.five import nextfun + +from kombu.tests.case import Case, Mock, SkipTest, mask_modules, patch + + +class MockConnection(dict): + + def __setattr__(self, key, value): + self[key] = value + + +class test_Channel(Case): + + def setUp(self): + if pyamqp is None: + raise SkipTest('py-amqp not installed') + + class Channel(pyamqp.Channel): + wait_returns = [] + + def _x_open(self, *args, **kwargs): + pass + + def wait(self, *args, **kwargs): + return self.wait_returns + + def _send_method(self, *args, **kwargs): + pass + + self.conn = Mock() + self.conn._get_free_channel_id.side_effect = nextfun(count(0)) + self.conn.channels = {} + self.channel = Channel(self.conn, 0) + + def test_init(self): + self.assertFalse(self.channel.no_ack_consumers) + + def test_prepare_message(self): + self.assertTrue(self.channel.prepare_message( + 'foobar', 10, 'application/data', 'utf-8', + properties={}, + )) + + def test_message_to_python(self): + message = Mock() + message.headers = {} + message.properties = {} + self.assertTrue(self.channel.message_to_python(message)) + + def test_close_resolves_connection_cycle(self): + self.assertIsNotNone(self.channel.connection) + self.channel.close() + self.assertIsNone(self.channel.connection) + + def test_basic_consume_registers_ack_status(self): + self.channel.wait_returns = 'my-consumer-tag' + self.channel.basic_consume('foo', no_ack=True) + self.assertIn('my-consumer-tag', self.channel.no_ack_consumers) + + self.channel.wait_returns = 'other-consumer-tag' + self.channel.basic_consume('bar', no_ack=False) + self.assertNotIn('other-consumer-tag', self.channel.no_ack_consumers) + + self.channel.basic_cancel('my-consumer-tag') + self.assertNotIn('my-consumer-tag', self.channel.no_ack_consumers) + + +class test_Transport(Case): + + def setUp(self): + if pyamqp is None: + raise SkipTest('py-amqp not installed') + self.connection = Connection('pyamqp://') + self.transport = self.connection.transport + + def test_create_channel(self): + connection = Mock() + self.transport.create_channel(connection) + connection.channel.assert_called_with() + + def test_driver_version(self): + self.assertTrue(self.transport.driver_version()) + + def test_drain_events(self): + connection = Mock() + self.transport.drain_events(connection, timeout=10.0) + connection.drain_events.assert_called_with(timeout=10.0) + + def test_dnspython_localhost_resolve_bug(self): + + class Conn(object): + + def __init__(self, **kwargs): + vars(self).update(kwargs) + + self.transport.Connection = Conn + self.transport.client.hostname = 'localhost' + conn1 = self.transport.establish_connection() + self.assertEqual(conn1.host, '127.0.0.1:5672') + + self.transport.client.hostname = 'example.com' + conn2 = self.transport.establish_connection() + self.assertEqual(conn2.host, 'example.com:5672') + + def test_close_connection(self): + connection = Mock() + connection.client = Mock() + self.transport.close_connection(connection) + + self.assertIsNone(connection.client) + connection.close.assert_called_with() + + @mask_modules('ssl') + def test_import_no_ssl(self): + pm = sys.modules.pop('amqp.connection') + try: + from amqp.connection import SSLError + self.assertEqual(SSLError.__module__, 'amqp.connection') + finally: + if pm is not None: + sys.modules['amqp.connection'] = pm + + +class test_pyamqp(Case): + + def setUp(self): + if pyamqp is None: + raise SkipTest('py-amqp not installed') + + def test_default_port(self): + + class Transport(pyamqp.Transport): + Connection = MockConnection + + c = Connection(port=None, transport=Transport).connect() + self.assertEqual(c['host'], + '127.0.0.1:%s' % (Transport.default_port, )) + + def test_custom_port(self): + + class Transport(pyamqp.Transport): + Connection = MockConnection + + c = Connection(port=1337, transport=Transport).connect() + self.assertEqual(c['host'], '127.0.0.1:1337') + + def test_register_with_event_loop(self): + t = pyamqp.Transport(Mock()) + conn = Mock(name='conn') + loop = Mock(name='loop') + t.register_with_event_loop(conn, loop) + loop.add_reader.assert_called_with( + conn.sock, t.on_readable, conn, loop, + ) + + def test_heartbeat_check(self): + t = pyamqp.Transport(Mock()) + conn = Mock() + t.heartbeat_check(conn, rate=4.331) + conn.heartbeat_tick.assert_called_with(rate=4.331) + + def test_get_manager(self): + with patch('kombu.transport.pyamqp.get_manager') as get_manager: + t = pyamqp.Transport(Mock()) + t.get_manager(1, kw=2) + get_manager.assert_called_with(t.client, 1, kw=2) diff --git a/kombu/tests/transport/test_redis.py b/kombu/tests/transport/test_redis.py new file mode 100644 index 0000000..17f2dcc --- /dev/null +++ b/kombu/tests/transport/test_redis.py @@ -0,0 +1,1237 @@ +from __future__ import absolute_import + +import socket +import types + +from anyjson import dumps, loads +from collections import defaultdict +from itertools import count + +from kombu import Connection, Exchange, Queue, Consumer, Producer +from kombu.exceptions import InconsistencyError, VersionMismatch +from kombu.five import Empty, Queue as _Queue +from kombu.transport import virtual +from kombu.utils import eventio # patch poll + +from kombu.tests.case import ( + Case, Mock, call, module_exists, skip_if_not_module, patch, +) + + +class _poll(eventio._select): + + def register(self, fd, flags): + if flags & eventio.READ: + self._rfd.add(fd) + + def poll(self, timeout): + events = [] + for fd in self._rfd: + if fd.data: + events.append((fd.fileno(), eventio.READ)) + return events + + +eventio.poll = _poll +from kombu.transport import redis # must import after poller patch + + +class ResponseError(Exception): + pass + + +class Client(object): + queues = {} + sets = defaultdict(set) + hashes = defaultdict(dict) + shard_hint = None + + def __init__(self, db=None, port=None, connection_pool=None, **kwargs): + self._called = [] + self._connection = None + self.bgsave_raises_ResponseError = False + self.connection = self._sconnection(self) + + def bgsave(self): + self._called.append('BGSAVE') + if self.bgsave_raises_ResponseError: + raise ResponseError() + + def delete(self, key): + self.queues.pop(key, None) + + def exists(self, key): + return key in self.queues or key in self.sets + + def hset(self, key, k, v): + self.hashes[key][k] = v + + def hget(self, key, k): + return self.hashes[key].get(k) + + def hdel(self, key, k): + self.hashes[key].pop(k, None) + + def sadd(self, key, member, *args): + self.sets[key].add(member) + zadd = sadd + + def smembers(self, key): + return self.sets.get(key, set()) + + def srem(self, key, *args): + self.sets.pop(key, None) + zrem = srem + + def llen(self, key): + try: + return self.queues[key].qsize() + except KeyError: + return 0 + + def lpush(self, key, value): + self.queues[key].put_nowait(value) + + def parse_response(self, connection, type, **options): + cmd, queues = self.connection._sock.data.pop() + assert cmd == type + self.connection._sock.data = [] + if type == 'BRPOP': + item = self.brpop(queues, 0.001) + if item: + return item + raise Empty() + + def brpop(self, keys, timeout=None): + key = keys[0] + try: + item = self.queues[key].get(timeout=timeout) + except Empty: + pass + else: + return key, item + + def rpop(self, key): + try: + return self.queues[key].get_nowait() + except KeyError: + pass + + def __contains__(self, k): + return k in self._called + + def pipeline(self): + return Pipeline(self) + + def encode(self, value): + return str(value) + + def _new_queue(self, key): + self.queues[key] = _Queue() + + class _sconnection(object): + disconnected = False + + class _socket(object): + blocking = True + filenos = count(30) + + def __init__(self, *args): + self._fileno = next(self.filenos) + self.data = [] + + def fileno(self): + return self._fileno + + def setblocking(self, blocking): + self.blocking = blocking + + def __init__(self, client): + self.client = client + self._sock = self._socket() + + def disconnect(self): + self.disconnected = True + + def send_command(self, cmd, *args): + self._sock.data.append((cmd, args)) + + def info(self): + return {'foo': 1} + + def pubsub(self, *args, **kwargs): + connection = self.connection + + class ConnectionPool(object): + + def get_connection(self, *args, **kwargs): + return connection + self.connection_pool = ConnectionPool() + + return self + + +class Pipeline(object): + + def __init__(self, client): + self.client = client + self.stack = [] + + def __getattr__(self, key): + if key not in self.__dict__: + + def _add(*args, **kwargs): + self.stack.append((getattr(self.client, key), args, kwargs)) + return self + + return _add + return self.__dict__[key] + + def execute(self): + stack = list(self.stack) + self.stack[:] = [] + return [fun(*args, **kwargs) for fun, args, kwargs in stack] + + +class Channel(redis.Channel): + + def _get_client(self): + return Client + + def _get_pool(self): + return Mock() + + def _get_response_error(self): + return ResponseError + + def _new_queue(self, queue, **kwargs): + self.client._new_queue(queue) + + def pipeline(self): + return Pipeline(Client()) + + +class Transport(redis.Transport): + Channel = Channel + + def _get_errors(self): + return ((KeyError, ), (IndexError, )) + + +class test_Channel(Case): + + @skip_if_not_module('redis') + def setUp(self): + self.connection = self.create_connection() + self.channel = self.connection.default_channel + + def create_connection(self, **kwargs): + kwargs.setdefault('transport_options', {'fanout_patterns': True}) + return Connection(transport=Transport, **kwargs) + + def _get_one_delivery_tag(self, n='test_uniq_tag'): + with self.create_connection() as conn1: + chan = conn1.default_channel + chan.exchange_declare(n) + chan.queue_declare(n) + chan.queue_bind(n, n, n) + msg = chan.prepare_message('quick brown fox') + chan.basic_publish(msg, n, n) + q, payload = chan.client.brpop([n]) + self.assertEqual(q, n) + self.assertTrue(payload) + pymsg = chan.message_to_python(loads(payload)) + return pymsg.delivery_tag + + def test_delivery_tag_is_uuid(self): + seen = set() + for i in range(100): + tag = self._get_one_delivery_tag() + self.assertNotIn(tag, seen) + seen.add(tag) + with self.assertRaises(ValueError): + int(tag) + self.assertEqual(len(tag), 36) + + def test_disable_ack_emulation(self): + conn = Connection(transport=Transport, transport_options={ + 'ack_emulation': False, + }) + + chan = conn.channel() + self.assertFalse(chan.ack_emulation) + self.assertEqual(chan.QoS, virtual.QoS) + + def test_redis_info_raises(self): + pool = Mock(name='pool') + pool_at_init = [pool] + client = Mock(name='client') + + class XChannel(Channel): + + def __init__(self, *args, **kwargs): + self._pool = pool_at_init[0] + super(XChannel, self).__init__(*args, **kwargs) + + def _get_client(self): + return lambda *_, **__: client + + class XTransport(Transport): + Channel = XChannel + + conn = Connection(transport=XTransport) + client.info.side_effect = RuntimeError() + with self.assertRaises(RuntimeError): + conn.channel() + pool.disconnect.assert_called_with() + pool.disconnect.reset_mock() + + pool_at_init = [None] + with self.assertRaises(RuntimeError): + conn.channel() + self.assertFalse(pool.disconnect.called) + + def test_after_fork(self): + self.channel._pool = None + self.channel._after_fork() + + self.channel._pool = Mock(name='pool') + self.channel._after_fork() + self.channel._pool.disconnect.assert_called_with() + + def test_next_delivery_tag(self): + self.assertNotEqual( + self.channel._next_delivery_tag(), + self.channel._next_delivery_tag(), + ) + + def test_do_restore_message(self): + client = Mock(name='client') + pl1 = {'body': 'BODY'} + spl1 = dumps(pl1) + lookup = self.channel._lookup = Mock(name='_lookup') + lookup.return_value = ['george', 'elaine'] + self.channel._do_restore_message( + pl1, 'ex', 'rkey', client, + ) + client.rpush.assert_has_calls([ + call('george', spl1), call('elaine', spl1), + ]) + + pl2 = {'body': 'BODY2', 'headers': {'x-funny': 1}} + headers_after = dict(pl2['headers'], redelivered=True) + spl2 = dumps(dict(pl2, headers=headers_after)) + self.channel._do_restore_message( + pl2, 'ex', 'rkey', client, + ) + client.rpush.assert_has_calls([ + call('george', spl2), call('elaine', spl2), + ]) + + client.rpush.side_effect = KeyError() + with patch('kombu.transport.redis.crit') as crit: + self.channel._do_restore_message( + pl2, 'ex', 'rkey', client, + ) + self.assertTrue(crit.called) + + def test_restore(self): + message = Mock(name='message') + with patch('kombu.transport.redis.loads') as loads: + loads.return_value = 'M', 'EX', 'RK' + client = self.channel.client = Mock(name='client') + restore = self.channel._do_restore_message = Mock( + name='_do_restore_message', + ) + pipe = Mock(name='pipe') + client.pipeline.return_value = pipe + pipe_hget = Mock(name='pipe.hget') + pipe.hget.return_value = pipe_hget + pipe_hget_hdel = Mock(name='pipe.hget.hdel') + pipe_hget.hdel.return_value = pipe_hget_hdel + result = Mock(name='result') + pipe_hget_hdel.execute.return_value = None, None + + self.channel._restore(message) + client.pipeline.assert_called_with() + unacked_key = self.channel.unacked_key + self.assertFalse(loads.called) + + tag = message.delivery_tag + pipe.hget.assert_called_with(unacked_key, tag) + pipe_hget.hdel.assert_called_with(unacked_key, tag) + pipe_hget_hdel.execute.assert_called_with() + + pipe_hget_hdel.execute.return_value = result, None + self.channel._restore(message) + loads.assert_called_with(result) + restore.assert_called_with('M', 'EX', 'RK', client, False) + + def test_qos_restore_visible(self): + client = self.channel.client = Mock(name='client') + client.zrevrangebyscore.return_value = [ + (1, 10), + (2, 20), + (3, 30), + ] + qos = redis.QoS(self.channel) + restore = qos.restore_by_tag = Mock(name='restore_by_tag') + qos._vrestore_count = 1 + qos.restore_visible() + self.assertFalse(client.zrevrangebyscore.called) + self.assertEqual(qos._vrestore_count, 2) + + qos._vrestore_count = 0 + qos.restore_visible() + restore.assert_has_calls([ + call(1, client), call(2, client), call(3, client), + ]) + self.assertEqual(qos._vrestore_count, 1) + + qos._vrestore_count = 0 + restore.reset_mock() + client.zrevrangebyscore.return_value = [] + qos.restore_visible() + self.assertFalse(restore.called) + self.assertEqual(qos._vrestore_count, 1) + + qos._vrestore_count = 0 + client.setnx.side_effect = redis.MutexHeld() + qos.restore_visible() + + def test_basic_consume_when_fanout_queue(self): + self.channel.exchange_declare(exchange='txconfan', type='fanout') + self.channel.queue_declare(queue='txconfanq') + self.channel.queue_bind(queue='txconfanq', exchange='txconfan') + + self.assertIn('txconfanq', self.channel._fanout_queues) + self.channel.basic_consume('txconfanq', False, None, 1) + self.assertIn('txconfanq', self.channel.active_fanout_queues) + self.assertEqual(self.channel._fanout_to_queue.get('txconfan'), + 'txconfanq') + + def test_basic_cancel_unknown_delivery_tag(self): + self.assertIsNone(self.channel.basic_cancel('txaseqwewq')) + + def test_subscribe_no_queues(self): + self.channel.subclient = Mock() + self.channel.active_fanout_queues.clear() + self.channel._subscribe() + + self.assertFalse(self.channel.subclient.subscribe.called) + + def test_subscribe(self): + self.channel.subclient = Mock() + self.channel.active_fanout_queues.add('a') + self.channel.active_fanout_queues.add('b') + self.channel._fanout_queues.update(a=('a', ''), b=('b', '')) + + self.channel._subscribe() + self.assertTrue(self.channel.subclient.psubscribe.called) + s_args, _ = self.channel.subclient.psubscribe.call_args + self.assertItemsEqual(s_args[0], ['a', 'b']) + + self.channel.subclient.connection._sock = None + self.channel._subscribe() + self.channel.subclient.connection.connect.assert_called_with() + + def test_handle_unsubscribe_message(self): + s = self.channel.subclient + s.subscribed = True + self.channel._handle_message(s, ['unsubscribe', 'a', 0]) + self.assertFalse(s.subscribed) + + def test_handle_pmessage_message(self): + self.assertDictEqual( + self.channel._handle_message( + self.channel.subclient, + ['pmessage', 'pattern', 'channel', 'data'], + ), + { + 'type': 'pmessage', + 'pattern': 'pattern', + 'channel': 'channel', + 'data': 'data', + }, + ) + + def test_handle_message(self): + self.assertDictEqual( + self.channel._handle_message( + self.channel.subclient, + ['type', 'channel', 'data'], + ), + { + 'type': 'type', + 'pattern': None, + 'channel': 'channel', + 'data': 'data', + }, + ) + + def test_brpop_start_but_no_queues(self): + self.assertIsNone(self.channel._brpop_start()) + + def test_receive(self): + s = self.channel.subclient = Mock() + self.channel._fanout_to_queue['a'] = 'b' + s.parse_response.return_value = ['message', 'a', + dumps({'hello': 'world'})] + payload, queue = self.channel._receive() + self.assertDictEqual(payload, {'hello': 'world'}) + self.assertEqual(queue, 'b') + + def test_receive_raises(self): + self.channel._in_listen = True + s = self.channel.subclient = Mock() + s.parse_response.side_effect = KeyError('foo') + + with self.assertRaises(redis.Empty): + self.channel._receive() + self.assertFalse(self.channel._in_listen) + + def test_receive_empty(self): + s = self.channel.subclient = Mock() + s.parse_response.return_value = None + + with self.assertRaises(redis.Empty): + self.channel._receive() + + def test_receive_different_message_Type(self): + s = self.channel.subclient = Mock() + s.parse_response.return_value = ['message', '/foo/', 0, 'data'] + + with self.assertRaises(redis.Empty): + self.channel._receive() + + def test_brpop_read_raises(self): + c = self.channel.client = Mock() + c.parse_response.side_effect = KeyError('foo') + + with self.assertRaises(redis.Empty): + self.channel._brpop_read() + + c.connection.disconnect.assert_called_with() + + def test_brpop_read_gives_None(self): + c = self.channel.client = Mock() + c.parse_response.return_value = None + + with self.assertRaises(redis.Empty): + self.channel._brpop_read() + + def test_poll_error(self): + c = self.channel.client = Mock() + c.parse_response = Mock() + self.channel._poll_error('BRPOP') + + c.parse_response.assert_called_with(c.connection, 'BRPOP') + + c.parse_response.side_effect = KeyError('foo') + with self.assertRaises(KeyError): + self.channel._poll_error('BRPOP') + + def test_poll_error_on_type_LISTEN(self): + c = self.channel.subclient = Mock() + c.parse_response = Mock() + self.channel._poll_error('LISTEN') + + c.parse_response.assert_called_with() + + c.parse_response.side_effect = KeyError('foo') + with self.assertRaises(KeyError): + self.channel._poll_error('LISTEN') + + def test_put_fanout(self): + self.channel._in_poll = False + c = self.channel.client = Mock() + + body = {'hello': 'world'} + self.channel._put_fanout('exchange', body, '') + c.publish.assert_called_with('exchange', dumps(body)) + + def test_put_priority(self): + client = self.channel.client = Mock(name='client') + msg1 = {'properties': {'delivery_info': {'priority': 3}}} + + self.channel._put('george', msg1) + client.lpush.assert_called_with( + self.channel._q_for_pri('george', 3), dumps(msg1), + ) + + msg2 = {'properties': {'delivery_info': {'priority': 313}}} + self.channel._put('george', msg2) + client.lpush.assert_called_with( + self.channel._q_for_pri('george', 9), dumps(msg2), + ) + + msg3 = {'properties': {'delivery_info': {}}} + self.channel._put('george', msg3) + client.lpush.assert_called_with( + self.channel._q_for_pri('george', 0), dumps(msg3), + ) + + def test_delete(self): + x = self.channel + self.channel._in_poll = False + delete = x.client.delete = Mock() + srem = x.client.srem = Mock() + + x._delete('queue', 'exchange', 'routing_key', None) + delete.assert_has_call('queue') + srem.assert_has_call(x.keyprefix_queue % ('exchange', ), + x.sep.join(['routing_key', '', 'queue'])) + + def test_has_queue(self): + self.channel._in_poll = False + exists = self.channel.client.exists = Mock() + exists.return_value = True + self.assertTrue(self.channel._has_queue('foo')) + exists.assert_has_call('foo') + + exists.return_value = False + self.assertFalse(self.channel._has_queue('foo')) + + def test_close_when_closed(self): + self.channel.closed = True + self.channel.close() + + def test_close_deletes_autodelete_fanout_queues(self): + self.channel._fanout_queues = {'foo': ('foo', ''), 'bar': ('bar', '')} + self.channel.auto_delete_queues = ['foo'] + self.channel.queue_delete = Mock(name='queue_delete') + + self.channel.close() + self.channel.queue_delete.assert_has_calls([call('foo')]) + + def test_close_client_close_raises(self): + c = self.channel.client = Mock() + c.connection.disconnect.side_effect = self.channel.ResponseError() + + self.channel.close() + c.connection.disconnect.assert_called_with() + + def test_invalid_database_raises_ValueError(self): + + with self.assertRaises(ValueError): + self.channel.connection.client.virtual_host = 'dwqeq' + self.channel._connparams() + + @skip_if_not_module('redis') + def test_connparams_allows_slash_in_db(self): + self.channel.connection.client.virtual_host = '/123' + self.assertEqual(self.channel._connparams()['db'], 123) + + @skip_if_not_module('redis') + def test_connparams_db_can_be_int(self): + self.channel.connection.client.virtual_host = 124 + self.assertEqual(self.channel._connparams()['db'], 124) + + def test_new_queue_with_auto_delete(self): + redis.Channel._new_queue(self.channel, 'george', auto_delete=False) + self.assertNotIn('george', self.channel.auto_delete_queues) + redis.Channel._new_queue(self.channel, 'elaine', auto_delete=True) + self.assertIn('elaine', self.channel.auto_delete_queues) + + @skip_if_not_module('redis') + def test_connparams_regular_hostname(self): + self.channel.connection.client.hostname = 'george.vandelay.com' + self.assertEqual( + self.channel._connparams()['host'], + 'george.vandelay.com', + ) + + def test_rotate_cycle_ValueError(self): + cycle = self.channel._queue_cycle = ['kramer', 'jerry'] + self.channel._rotate_cycle('kramer') + self.assertEqual(cycle, ['jerry', 'kramer']) + self.channel._rotate_cycle('elaine') + + @skip_if_not_module('redis') + def test_get_client(self): + import redis as R + KombuRedis = redis.Channel._get_client(self.channel) + self.assertTrue(KombuRedis) + + Rv = getattr(R, 'VERSION', None) + try: + R.VERSION = (2, 4, 0) + with self.assertRaises(VersionMismatch): + redis.Channel._get_client(self.channel) + finally: + if Rv is not None: + R.VERSION = Rv + + @skip_if_not_module('redis') + def test_get_response_error(self): + from redis.exceptions import ResponseError + self.assertIs(redis.Channel._get_response_error(self.channel), + ResponseError) + + def test_avail_client_when_not_in_poll(self): + self.channel._in_poll = False + c = self.channel.client = Mock() + + with self.channel.conn_or_acquire() as client: + self.assertIs(client, c) + + def test_avail_client_when_in_poll(self): + self.channel._in_poll = True + self.channel._pool = Mock() + cc = self.channel._create_client = Mock() + client = cc.return_value = Mock() + + with self.channel.conn_or_acquire(): + pass + self.channel.pool.release.assert_called_with(client.connection) + cc.assert_called_with() + + def test_register_with_event_loop(self): + transport = self.connection.transport + transport.cycle = Mock(name='cycle') + transport.cycle.fds = {12: 'LISTEN', 13: 'BRPOP'} + conn = Mock(name='conn') + loop = Mock(name='loop') + redis.Transport.register_with_event_loop(transport, conn, loop) + transport.cycle.on_poll_init.assert_called_with(loop.poller) + loop.call_repeatedly.assert_called_with( + 10, transport.cycle.maybe_restore_messages, + ) + self.assertTrue(loop.on_tick.add.called) + on_poll_start = loop.on_tick.add.call_args[0][0] + + on_poll_start() + transport.cycle.on_poll_start.assert_called_with() + loop.add_reader.assert_has_calls([ + call(12, transport.on_readable, 12), + call(13, transport.on_readable, 13), + ]) + + def test_transport_on_readable(self): + transport = self.connection.transport + cycle = transport.cycle = Mock(name='cyle') + cycle.on_readable.return_value = None + + redis.Transport.on_readable(transport, 13) + cycle.on_readable.assert_called_with(13) + cycle.on_readable.reset_mock() + + queue = Mock(name='queue') + ret = (Mock(name='message'), queue) + cycle.on_readable.return_value = ret + with self.assertRaises(KeyError): + redis.Transport.on_readable(transport, 14) + + cb = transport._callbacks[queue] = Mock(name='callback') + redis.Transport.on_readable(transport, 14) + cb.assert_called_with(ret[0]) + + @skip_if_not_module('redis') + def test_transport_get_errors(self): + self.assertTrue(redis.Transport._get_errors(self.connection.transport)) + + @skip_if_not_module('redis') + def test_transport_driver_version(self): + self.assertTrue( + redis.Transport.driver_version(self.connection.transport), + ) + + @skip_if_not_module('redis') + def test_transport_get_errors_when_InvalidData_used(self): + from redis import exceptions + + class ID(Exception): + pass + + DataError = getattr(exceptions, 'DataError', None) + InvalidData = getattr(exceptions, 'InvalidData', None) + exceptions.InvalidData = ID + exceptions.DataError = None + try: + errors = redis.Transport._get_errors(self.connection.transport) + self.assertTrue(errors) + self.assertIn(ID, errors[1]) + finally: + if DataError is not None: + exceptions.DataError = DataError + if InvalidData is not None: + exceptions.InvalidData = InvalidData + + def test_empty_queues_key(self): + channel = self.channel + channel._in_poll = False + key = channel.keyprefix_queue % 'celery' + + # Everything is fine, there is a list of queues. + channel.client.sadd(key, 'celery\x06\x16\x06\x16celery') + self.assertListEqual(channel.get_table('celery'), + [('celery', '', 'celery')]) + + # ... then for some reason, the _kombu.binding.celery key gets lost + channel.client.srem(key) + + # which raises a channel error so that the consumer/publisher + # can recover by redeclaring the required entities. + with self.assertRaises(InconsistencyError): + self.channel.get_table('celery') + + @skip_if_not_module('redis') + def test_socket_connection(self): + with patch('kombu.transport.redis.Channel._create_client'): + with Connection('redis+socket:///tmp/redis.sock') as conn: + connparams = conn.default_channel._connparams() + self.assertTrue(issubclass( + connparams['connection_class'], + redis.redis.UnixDomainSocketConnection, + )) + self.assertEqual(connparams['path'], '/tmp/redis.sock') + + +class test_Redis(Case): + + @skip_if_not_module('redis') + def setUp(self): + self.connection = Connection(transport=Transport) + self.exchange = Exchange('test_Redis', type='direct') + self.queue = Queue('test_Redis', self.exchange, 'test_Redis') + + def tearDown(self): + self.connection.close() + + def test_publish__get(self): + channel = self.connection.channel() + producer = Producer(channel, self.exchange, routing_key='test_Redis') + self.queue(channel).declare() + + producer.publish({'hello': 'world'}) + + self.assertDictEqual(self.queue(channel).get().payload, + {'hello': 'world'}) + self.assertIsNone(self.queue(channel).get()) + self.assertIsNone(self.queue(channel).get()) + self.assertIsNone(self.queue(channel).get()) + + def test_publish__consume(self): + connection = Connection(transport=Transport) + channel = connection.channel() + producer = Producer(channel, self.exchange, routing_key='test_Redis') + consumer = Consumer(channel, queues=[self.queue]) + + producer.publish({'hello2': 'world2'}) + _received = [] + + def callback(message_data, message): + _received.append(message_data) + message.ack() + + consumer.register_callback(callback) + consumer.consume() + + self.assertIn(channel, channel.connection.cycle._channels) + try: + connection.drain_events(timeout=1) + self.assertTrue(_received) + with self.assertRaises(socket.timeout): + connection.drain_events(timeout=0.01) + finally: + channel.close() + + def test_purge(self): + channel = self.connection.channel() + producer = Producer(channel, self.exchange, routing_key='test_Redis') + self.queue(channel).declare() + + for i in range(10): + producer.publish({'hello': 'world-%s' % (i, )}) + + self.assertEqual(channel._size('test_Redis'), 10) + self.assertEqual(self.queue(channel).purge(), 10) + channel.close() + + def test_db_values(self): + Connection(virtual_host=1, + transport=Transport).channel() + + Connection(virtual_host='1', + transport=Transport).channel() + + Connection(virtual_host='/1', + transport=Transport).channel() + + with self.assertRaises(Exception): + Connection('redis:///foo').channel() + + def test_db_port(self): + c1 = Connection(port=None, transport=Transport).channel() + c1.close() + + c2 = Connection(port=9999, transport=Transport).channel() + c2.close() + + def test_close_poller_not_active(self): + c = Connection(transport=Transport).channel() + cycle = c.connection.cycle + c.client.connection + c.close() + self.assertNotIn(c, cycle._channels) + + def test_close_ResponseError(self): + c = Connection(transport=Transport).channel() + c.client.bgsave_raises_ResponseError = True + c.close() + + def test_close_disconnects(self): + c = Connection(transport=Transport).channel() + conn1 = c.client.connection + conn2 = c.subclient.connection + c.close() + self.assertTrue(conn1.disconnected) + self.assertTrue(conn2.disconnected) + + def test_get__Empty(self): + channel = self.connection.channel() + with self.assertRaises(Empty): + channel._get('does-not-exist') + channel.close() + + def test_get_client(self): + + myredis, exceptions = _redis_modules() + + @module_exists(myredis, exceptions) + def _do_test(): + conn = Connection(transport=Transport) + chan = conn.channel() + self.assertTrue(chan.Client) + self.assertTrue(chan.ResponseError) + self.assertTrue(conn.transport.connection_errors) + self.assertTrue(conn.transport.channel_errors) + + _do_test() + + +def _redis_modules(): + + class ConnectionError(Exception): + pass + + class AuthenticationError(Exception): + pass + + class InvalidData(Exception): + pass + + class InvalidResponse(Exception): + pass + + class ResponseError(Exception): + pass + + exceptions = types.ModuleType('redis.exceptions') + exceptions.ConnectionError = ConnectionError + exceptions.AuthenticationError = AuthenticationError + exceptions.InvalidData = InvalidData + exceptions.InvalidResponse = InvalidResponse + exceptions.ResponseError = ResponseError + + class Redis(object): + pass + + myredis = types.ModuleType('redis') + myredis.exceptions = exceptions + myredis.Redis = Redis + + return myredis, exceptions + + +class test_MultiChannelPoller(Case): + + @skip_if_not_module('redis') + def setUp(self): + self.Poller = redis.MultiChannelPoller + + def test_on_poll_start(self): + p = self.Poller() + p._channels = [] + p.on_poll_start() + p._register_BRPOP = Mock(name='_register_BRPOP') + p._register_LISTEN = Mock(name='_register_LISTEN') + + chan1 = Mock(name='chan1') + p._channels = [chan1] + chan1.active_queues = [] + chan1.active_fanout_queues = [] + p.on_poll_start() + + chan1.active_queues = ['q1'] + chan1.active_fanout_queues = ['q2'] + chan1.qos.can_consume.return_value = False + + p.on_poll_start() + p._register_LISTEN.assert_called_with(chan1) + self.assertFalse(p._register_BRPOP.called) + + chan1.qos.can_consume.return_value = True + p._register_LISTEN.reset_mock() + p.on_poll_start() + + p._register_BRPOP.assert_called_with(chan1) + p._register_LISTEN.assert_called_with(chan1) + + def test_on_poll_init(self): + p = self.Poller() + chan1 = Mock(name='chan1') + p._channels = [] + poller = Mock(name='poller') + p.on_poll_init(poller) + self.assertIs(p.poller, poller) + + p._channels = [chan1] + p.on_poll_init(poller) + chan1.qos.restore_visible.assert_called_with( + num=chan1.unacked_restore_limit, + ) + + def test_handle_event(self): + p = self.Poller() + chan = Mock(name='chan') + p._fd_to_chan[13] = chan, 'BRPOP' + chan.handlers = {'BRPOP': Mock(name='BRPOP')} + + chan.qos.can_consume.return_value = False + p.handle_event(13, redis.READ) + self.assertFalse(chan.handlers['BRPOP'].called) + + chan.qos.can_consume.return_value = True + p.handle_event(13, redis.READ) + chan.handlers['BRPOP'].assert_called_with() + + p.handle_event(13, redis.ERR) + chan._poll_error.assert_called_with('BRPOP') + + p.handle_event(13, ~(redis.READ | redis.ERR)) + + def test_fds(self): + p = self.Poller() + p._fd_to_chan = {1: 2} + self.assertDictEqual(p.fds, p._fd_to_chan) + + def test_close_unregisters_fds(self): + p = self.Poller() + poller = p.poller = Mock() + p._chan_to_sock.update({1: 1, 2: 2, 3: 3}) + + p.close() + + self.assertEqual(poller.unregister.call_count, 3) + u_args = poller.unregister.call_args_list + + self.assertItemsEqual(u_args, [((1, ), {}), + ((2, ), {}), + ((3, ), {})]) + + def test_close_when_unregister_raises_KeyError(self): + p = self.Poller() + p.poller = Mock() + p._chan_to_sock.update({1: 1}) + p.poller.unregister.side_effect = KeyError(1) + p.close() + + def test_close_resets_state(self): + p = self.Poller() + p.poller = Mock() + p._channels = Mock() + p._fd_to_chan = Mock() + p._chan_to_sock = Mock() + + p._chan_to_sock.itervalues.return_value = [] + p._chan_to_sock.values.return_value = [] # py3k + + p.close() + p._channels.clear.assert_called_with() + p._fd_to_chan.clear.assert_called_with() + p._chan_to_sock.clear.assert_called_with() + + def test_register_when_registered_reregisters(self): + p = self.Poller() + p.poller = Mock() + channel, client, type = Mock(), Mock(), Mock() + sock = client.connection._sock = Mock() + sock.fileno.return_value = 10 + + p._chan_to_sock = {(channel, client, type): 6} + p._register(channel, client, type) + p.poller.unregister.assert_called_with(6) + self.assertTupleEqual(p._fd_to_chan[10], (channel, type)) + self.assertEqual(p._chan_to_sock[(channel, client, type)], sock) + p.poller.register.assert_called_with(sock, p.eventflags) + + # when client not connected yet + client.connection._sock = None + + def after_connected(): + client.connection._sock = Mock() + client.connection.connect.side_effect = after_connected + + p._register(channel, client, type) + client.connection.connect.assert_called_with() + + def test_register_BRPOP(self): + p = self.Poller() + channel = Mock() + channel.client.connection._sock = None + p._register = Mock() + + channel._in_poll = False + p._register_BRPOP(channel) + self.assertEqual(channel._brpop_start.call_count, 1) + self.assertEqual(p._register.call_count, 1) + + channel.client.connection._sock = Mock() + p._chan_to_sock[(channel, channel.client, 'BRPOP')] = True + channel._in_poll = True + p._register_BRPOP(channel) + self.assertEqual(channel._brpop_start.call_count, 1) + self.assertEqual(p._register.call_count, 1) + + def test_register_LISTEN(self): + p = self.Poller() + channel = Mock() + channel.subclient.connection._sock = None + channel._in_listen = False + p._register = Mock() + + p._register_LISTEN(channel) + p._register.assert_called_with(channel, channel.subclient, 'LISTEN') + self.assertEqual(p._register.call_count, 1) + self.assertEqual(channel._subscribe.call_count, 1) + + channel._in_listen = True + channel.subclient.connection._sock = Mock() + p._register_LISTEN(channel) + self.assertEqual(p._register.call_count, 1) + self.assertEqual(channel._subscribe.call_count, 1) + + def create_get(self, events=None, queues=None, fanouts=None): + _pr = [] if events is None else events + _aq = [] if queues is None else queues + _af = [] if fanouts is None else fanouts + p = self.Poller() + p.poller = Mock() + p.poller.poll.return_value = _pr + + p._register_BRPOP = Mock() + p._register_LISTEN = Mock() + + channel = Mock() + p._channels = [channel] + channel.active_queues = _aq + channel.active_fanout_queues = _af + + return p, channel + + def test_get_no_actions(self): + p, channel = self.create_get() + + with self.assertRaises(redis.Empty): + p.get() + + def test_qos_reject(self): + p, channel = self.create_get() + qos = redis.QoS(channel) + qos.ack = Mock(name='Qos.ack') + qos.reject(1234) + qos.ack.assert_called_with(1234) + + def test_get_brpop_qos_allow(self): + p, channel = self.create_get(queues=['a_queue']) + channel.qos.can_consume.return_value = True + + with self.assertRaises(redis.Empty): + p.get() + + p._register_BRPOP.assert_called_with(channel) + + def test_get_brpop_qos_disallow(self): + p, channel = self.create_get(queues=['a_queue']) + channel.qos.can_consume.return_value = False + + with self.assertRaises(redis.Empty): + p.get() + + self.assertFalse(p._register_BRPOP.called) + + def test_get_listen(self): + p, channel = self.create_get(fanouts=['f_queue']) + + with self.assertRaises(redis.Empty): + p.get() + + p._register_LISTEN.assert_called_with(channel) + + def test_get_receives_ERR(self): + p, channel = self.create_get(events=[(1, eventio.ERR)]) + p._fd_to_chan[1] = (channel, 'BRPOP') + + with self.assertRaises(redis.Empty): + p.get() + + channel._poll_error.assert_called_with('BRPOP') + + def test_get_receives_multiple(self): + p, channel = self.create_get(events=[(1, eventio.ERR), + (1, eventio.ERR)]) + p._fd_to_chan[1] = (channel, 'BRPOP') + + with self.assertRaises(redis.Empty): + p.get() + + channel._poll_error.assert_called_with('BRPOP') + + +class test_Mutex(Case): + + @skip_if_not_module('redis') + def test_mutex(self, lock_id='xxx'): + client = Mock(name='client') + with patch('kombu.transport.redis.uuid') as uuid: + # Won + uuid.return_value = lock_id + client.setnx.return_value = True + pipe = client.pipeline.return_value = Mock(name='pipe') + pipe.get.return_value = lock_id + held = False + with redis.Mutex(client, 'foo1', 100): + held = True + self.assertTrue(held) + client.setnx.assert_called_with('foo1', lock_id) + pipe.get.return_value = 'yyy' + held = False + with redis.Mutex(client, 'foo1', 100): + held = True + self.assertTrue(held) + + # Did not win + client.expire.reset_mock() + pipe.get.return_value = lock_id + client.setnx.return_value = False + with self.assertRaises(redis.MutexHeld): + held = False + with redis.Mutex(client, 'foo1', '100'): + held = True + self.assertFalse(held) + client.ttl.return_value = 0 + with self.assertRaises(redis.MutexHeld): + held = False + with redis.Mutex(client, 'foo1', '100'): + held = True + self.assertFalse(held) + self.assertTrue(client.expire.called) + + # Wins but raises WatchError (and that is ignored) + client.setnx.return_value = True + pipe.watch.side_effect = redis.redis.WatchError() + held = False + with redis.Mutex(client, 'foo1', 100): + held = True + self.assertTrue(held) diff --git a/kombu/tests/transport/test_sqlalchemy.py b/kombu/tests/transport/test_sqlalchemy.py new file mode 100644 index 0000000..0705599 --- /dev/null +++ b/kombu/tests/transport/test_sqlalchemy.py @@ -0,0 +1,69 @@ +from __future__ import absolute_import + +from kombu import Connection +from kombu.tests.case import Case, SkipTest, patch + + +class test_sqlalchemy(Case): + + def setUp(self): + try: + import sqlalchemy # noqa + except ImportError: + raise SkipTest('sqlalchemy not installed') + + def test_url_parser(self): + with patch('kombu.transport.sqlalchemy.Channel._open'): + url = 'sqlalchemy+sqlite:///celerydb.sqlite' + Connection(url).connect() + + url = 'sqla+sqlite:///celerydb.sqlite' + Connection(url).connect() + + # Should prevent regression fixed by f187ccd + url = 'sqlb+sqlite:///celerydb.sqlite' + with self.assertRaises(KeyError): + Connection(url).connect() + + def test_simple_queueing(self): + conn = Connection('sqlalchemy+sqlite:///:memory:') + conn.connect() + channel = conn.channel() + self.assertEqual( + channel.queue_cls.__table__.name, + 'kombu_queue' + ) + self.assertEqual( + channel.message_cls.__table__.name, + 'kombu_message' + ) + channel._put('celery', 'DATA') + assert channel._get('celery') == 'DATA' + + def test_custom_table_names(self): + raise SkipTest('causes global side effect') + conn = Connection('sqlalchemy+sqlite:///:memory:', transport_options={ + 'queue_tablename': 'my_custom_queue', + 'message_tablename': 'my_custom_message' + }) + conn.connect() + channel = conn.channel() + self.assertEqual( + channel.queue_cls.__table__.name, + 'my_custom_queue' + ) + self.assertEqual( + channel.message_cls.__table__.name, + 'my_custom_message' + ) + channel._put('celery', 'DATA') + assert channel._get('celery') == 'DATA' + + def test_clone(self): + hostname = 'sqlite:///celerydb.sqlite' + x = Connection('+'.join(['sqla', hostname])) + self.assertEqual(x.uri_prefix, 'sqla') + self.assertEqual(x.hostname, hostname) + clone = x.clone() + self.assertEqual(clone.hostname, hostname) + self.assertEqual(clone.uri_prefix, 'sqla') diff --git a/kombu/tests/transport/test_transport.py b/kombu/tests/transport/test_transport.py new file mode 100644 index 0000000..e10943b --- /dev/null +++ b/kombu/tests/transport/test_transport.py @@ -0,0 +1,44 @@ +from __future__ import absolute_import + +from kombu import transport + +from kombu.tests.case import Case, Mock, patch + + +class test_supports_librabbitmq(Case): + + def test_eventlet(self): + with patch('kombu.transport._detect_environment') as de: + de.return_value = 'eventlet' + self.assertFalse(transport.supports_librabbitmq()) + + +class test_transport(Case): + + def test_resolve_transport(self): + from kombu.transport.memory import Transport + self.assertIs(transport.resolve_transport( + 'kombu.transport.memory:Transport'), + Transport) + self.assertIs(transport.resolve_transport(Transport), Transport) + + def test_resolve_transport_alias_callable(self): + m = transport.TRANSPORT_ALIASES['George'] = Mock(name='lazyalias') + try: + transport.resolve_transport('George') + m.assert_called_with() + finally: + transport.TRANSPORT_ALIASES.pop('George') + + def test_resolve_transport_alias(self): + self.assertTrue(transport.resolve_transport('pyamqp')) + + +class test_transport_ghettoq(Case): + + @patch('warnings.warn') + def test_compat(self, warn): + x = transport._ghettoq('Redis', 'redis', 'redis') + + self.assertEqual(x(), 'kombu.transport.redis.Transport') + self.assertTrue(warn.called) diff --git a/kombu/tests/transport/virtual/__init__.py b/kombu/tests/transport/virtual/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kombu/tests/transport/virtual/test_base.py b/kombu/tests/transport/virtual/test_base.py new file mode 100644 index 0000000..f872356 --- /dev/null +++ b/kombu/tests/transport/virtual/test_base.py @@ -0,0 +1,540 @@ +from __future__ import absolute_import + +import warnings + +from kombu import Connection +from kombu.exceptions import ResourceError, ChannelError +from kombu.transport import virtual +from kombu.utils import uuid +from kombu.compression import compress + +from kombu.tests.case import Case, Mock, patch, redirect_stdouts + + +def client(**kwargs): + return Connection(transport='kombu.transport.virtual:Transport', **kwargs) + + +def memory_client(): + return Connection(transport='memory') + + +class test_BrokerState(Case): + + def test_constructor(self): + s = virtual.BrokerState() + self.assertTrue(hasattr(s, 'exchanges')) + self.assertTrue(hasattr(s, 'bindings')) + + t = virtual.BrokerState(exchanges=16, bindings=32) + self.assertEqual(t.exchanges, 16) + self.assertEqual(t.bindings, 32) + + +class test_QoS(Case): + + def setUp(self): + self.q = virtual.QoS(client().channel(), prefetch_count=10) + + def tearDown(self): + self.q._on_collect.cancel() + + def test_constructor(self): + self.assertTrue(self.q.channel) + self.assertTrue(self.q.prefetch_count) + self.assertFalse(self.q._delivered.restored) + self.assertTrue(self.q._on_collect) + + @redirect_stdouts + def test_can_consume(self, stdout, stderr): + _restored = [] + + class RestoreChannel(virtual.Channel): + do_restore = True + + def _restore(self, message): + _restored.append(message) + + self.assertTrue(self.q.can_consume()) + for i in range(self.q.prefetch_count - 1): + self.q.append(i, uuid()) + self.assertTrue(self.q.can_consume()) + self.q.append(i + 1, uuid()) + self.assertFalse(self.q.can_consume()) + + tag1 = next(iter(self.q._delivered)) + self.q.ack(tag1) + self.assertTrue(self.q.can_consume()) + + tag2 = uuid() + self.q.append(i + 2, tag2) + self.assertFalse(self.q.can_consume()) + self.q.reject(tag2) + self.assertTrue(self.q.can_consume()) + + self.q.channel = RestoreChannel(self.q.channel.connection) + tag3 = uuid() + self.q.append(i + 3, tag3) + self.q.reject(tag3, requeue=True) + self.q._flush() + self.q.restore_unacked_once() + self.assertListEqual(_restored, [11, 9, 8, 7, 6, 5, 4, 3, 2, 1]) + self.assertTrue(self.q._delivered.restored) + self.assertFalse(self.q._delivered) + + self.q.restore_unacked_once() + self.q._delivered.restored = False + self.q.restore_unacked_once() + + self.assertTrue(stderr.getvalue()) + self.assertFalse(stdout.getvalue()) + + self.q.restore_at_shutdown = False + self.q.restore_unacked_once() + + def test_get(self): + self.q._delivered['foo'] = 1 + self.assertEqual(self.q.get('foo'), 1) + + +class test_Message(Case): + + def test_create(self): + c = client().channel() + data = c.prepare_message('the quick brown fox...') + tag = data['properties']['delivery_tag'] = uuid() + message = c.message_to_python(data) + self.assertIsInstance(message, virtual.Message) + self.assertIs(message, c.message_to_python(message)) + if message.errors: + message._reraise_error() + + self.assertEqual(message.body, + 'the quick brown fox...'.encode('utf-8')) + self.assertTrue(message.delivery_tag, tag) + + def test_create_no_body(self): + virtual.Message(Mock(), { + 'body': None, + 'properties': {'delivery_tag': 1}}) + + def test_serializable(self): + c = client().channel() + body, content_type = compress('the quick brown fox...', 'gzip') + data = c.prepare_message(body, headers={'compression': content_type}) + tag = data['properties']['delivery_tag'] = uuid() + message = c.message_to_python(data) + dict_ = message.serializable() + self.assertEqual(dict_['body'], + 'the quick brown fox...'.encode('utf-8')) + self.assertEqual(dict_['properties']['delivery_tag'], tag) + self.assertFalse('compression' in dict_['headers']) + + +class test_AbstractChannel(Case): + + def test_get(self): + with self.assertRaises(NotImplementedError): + virtual.AbstractChannel()._get('queue') + + def test_put(self): + with self.assertRaises(NotImplementedError): + virtual.AbstractChannel()._put('queue', 'm') + + def test_size(self): + self.assertEqual(virtual.AbstractChannel()._size('queue'), 0) + + def test_purge(self): + with self.assertRaises(NotImplementedError): + virtual.AbstractChannel()._purge('queue') + + def test_delete(self): + with self.assertRaises(NotImplementedError): + virtual.AbstractChannel()._delete('queue') + + def test_new_queue(self): + self.assertIsNone(virtual.AbstractChannel()._new_queue('queue')) + + def test_has_queue(self): + self.assertTrue(virtual.AbstractChannel()._has_queue('queue')) + + def test_poll(self): + + class Cycle(object): + called = False + + def get(self): + self.called = True + return True + + cycle = Cycle() + self.assertTrue(virtual.AbstractChannel()._poll(cycle)) + self.assertTrue(cycle.called) + + +class test_Channel(Case): + + def setUp(self): + self.channel = client().channel() + + def tearDown(self): + if self.channel._qos is not None: + self.channel._qos._on_collect.cancel() + + def test_exceeds_channel_max(self): + c = client() + t = c.transport + avail = t._avail_channel_ids = Mock(name='_avail_channel_ids') + avail.pop.side_effect = IndexError() + with self.assertRaises(ResourceError): + virtual.Channel(t) + + def test_exchange_bind_interface(self): + with self.assertRaises(NotImplementedError): + self.channel.exchange_bind('dest', 'src', 'key') + + def test_exchange_unbind_interface(self): + with self.assertRaises(NotImplementedError): + self.channel.exchange_unbind('dest', 'src', 'key') + + def test_queue_unbind_interface(self): + with self.assertRaises(NotImplementedError): + self.channel.queue_unbind('dest', 'ex', 'key') + + def test_management(self): + m = self.channel.connection.client.get_manager() + self.assertTrue(m) + m.get_bindings() + m.close() + + def test_exchange_declare(self): + c = self.channel + + with self.assertRaises(ChannelError): + c.exchange_declare('test_exchange_declare', 'direct', + durable=True, auto_delete=True, passive=True) + c.exchange_declare('test_exchange_declare', 'direct', + durable=True, auto_delete=True) + c.exchange_declare('test_exchange_declare', 'direct', + durable=True, auto_delete=True, passive=True) + self.assertIn('test_exchange_declare', c.state.exchanges) + # can declare again with same values + c.exchange_declare('test_exchange_declare', 'direct', + durable=True, auto_delete=True) + self.assertIn('test_exchange_declare', c.state.exchanges) + + # using different values raises NotEquivalentError + with self.assertRaises(virtual.NotEquivalentError): + c.exchange_declare('test_exchange_declare', 'direct', + durable=False, auto_delete=True) + + def test_exchange_delete(self, ex='test_exchange_delete'): + + class PurgeChannel(virtual.Channel): + purged = [] + + def _purge(self, queue): + self.purged.append(queue) + + c = PurgeChannel(self.channel.connection) + + c.exchange_declare(ex, 'direct', durable=True, auto_delete=True) + self.assertIn(ex, c.state.exchanges) + self.assertNotIn(ex, c.state.bindings) # no bindings yet + c.exchange_delete(ex) + self.assertNotIn(ex, c.state.exchanges) + + c.exchange_declare(ex, 'direct', durable=True, auto_delete=True) + c.queue_declare(ex) + c.queue_bind(ex, ex, ex) + self.assertTrue(c.state.bindings[ex]) + c.exchange_delete(ex) + self.assertNotIn(ex, c.state.bindings) + self.assertIn(ex, c.purged) + + def test_queue_delete__if_empty(self, n='test_queue_delete__if_empty'): + class PurgeChannel(virtual.Channel): + purged = [] + size = 30 + + def _purge(self, queue): + self.purged.append(queue) + + def _size(self, queue): + return self.size + + c = PurgeChannel(self.channel.connection) + c.exchange_declare(n) + c.queue_declare(n) + c.queue_bind(n, n, n) + # tests code path that returns if queue already bound. + c.queue_bind(n, n, n) + + c.queue_delete(n, if_empty=True) + self.assertIn(n, c.state.bindings) + + c.size = 0 + c.queue_delete(n, if_empty=True) + self.assertNotIn(n, c.state.bindings) + self.assertIn(n, c.purged) + + def test_queue_purge(self, n='test_queue_purge'): + + class PurgeChannel(virtual.Channel): + purged = [] + + def _purge(self, queue): + self.purged.append(queue) + + c = PurgeChannel(self.channel.connection) + c.exchange_declare(n) + c.queue_declare(n) + c.queue_bind(n, n, n) + c.queue_purge(n) + self.assertIn(n, c.purged) + + def test_basic_publish_unique_delivery_tags(self, n='test_uniq_tag'): + c1 = memory_client().channel() + c2 = memory_client().channel() + + for c in (c1, c2): + c.exchange_declare(n) + c.queue_declare(n) + c.queue_bind(n, n, n) + m1 = c1.prepare_message('George Costanza') + m2 = c2.prepare_message('Elaine Marie Benes') + c1.basic_publish(m1, n, n) + c2.basic_publish(m2, n, n) + + r1 = c1.message_to_python(c1.basic_get(n)) + r2 = c2.message_to_python(c2.basic_get(n)) + + self.assertNotEqual(r1.delivery_tag, r2.delivery_tag) + with self.assertRaises(ValueError): + int(r1.delivery_tag) + with self.assertRaises(ValueError): + int(r2.delivery_tag) + + def test_basic_publish__get__consume__restore(self, + n='test_basic_publish'): + c = memory_client().channel() + + c.exchange_declare(n) + c.queue_declare(n) + c.queue_bind(n, n, n) + c.queue_declare(n + '2') + c.queue_bind(n + '2', n, n) + + m = c.prepare_message('nthex quick brown fox...') + c.basic_publish(m, n, n) + + r1 = c.message_to_python(c.basic_get(n)) + self.assertTrue(r1) + self.assertEqual(r1.body, + 'nthex quick brown fox...'.encode('utf-8')) + self.assertIsNone(c.basic_get(n)) + + consumer_tag = uuid() + + c.basic_consume(n + '2', False, + consumer_tag=consumer_tag, callback=lambda *a: None) + self.assertIn(n + '2', c._active_queues) + r2, _ = c.drain_events() + r2 = c.message_to_python(r2) + self.assertEqual(r2.body, + 'nthex quick brown fox...'.encode('utf-8')) + self.assertEqual(r2.delivery_info['exchange'], n) + self.assertEqual(r2.delivery_info['routing_key'], n) + with self.assertRaises(virtual.Empty): + c.drain_events() + c.basic_cancel(consumer_tag) + + c._restore(r2) + r3 = c.message_to_python(c.basic_get(n)) + self.assertTrue(r3) + self.assertEqual(r3.body, 'nthex quick brown fox...'.encode('utf-8')) + self.assertIsNone(c.basic_get(n)) + + def test_basic_ack(self): + + class MockQoS(virtual.QoS): + was_acked = False + + def ack(self, delivery_tag): + self.was_acked = True + + self.channel._qos = MockQoS(self.channel) + self.channel.basic_ack('foo') + self.assertTrue(self.channel._qos.was_acked) + + def test_basic_recover__requeue(self): + + class MockQoS(virtual.QoS): + was_restored = False + + def restore_unacked(self): + self.was_restored = True + + self.channel._qos = MockQoS(self.channel) + self.channel.basic_recover(requeue=True) + self.assertTrue(self.channel._qos.was_restored) + + def test_restore_unacked_raises_BaseException(self): + q = self.channel.qos + q._flush = Mock() + q._delivered = {1: 1} + + q.channel._restore = Mock() + q.channel._restore.side_effect = SystemExit + + errors = q.restore_unacked() + self.assertIsInstance(errors[0][0], SystemExit) + self.assertEqual(errors[0][1], 1) + self.assertFalse(q._delivered) + + @patch('kombu.transport.virtual.emergency_dump_state') + @patch('kombu.transport.virtual.say') + def test_restore_unacked_once_when_unrestored(self, say, + emergency_dump_state): + q = self.channel.qos + q._flush = Mock() + + class State(dict): + restored = False + + q._delivered = State({1: 1}) + ru = q.restore_unacked = Mock() + exc = None + try: + raise KeyError() + except KeyError as exc_: + exc = exc_ + ru.return_value = [(exc, 1)] + + self.channel.do_restore = True + q.restore_unacked_once() + self.assertTrue(say.called) + self.assertTrue(emergency_dump_state.called) + + def test_basic_recover(self): + with self.assertRaises(NotImplementedError): + self.channel.basic_recover(requeue=False) + + def test_basic_reject(self): + + class MockQoS(virtual.QoS): + was_rejected = False + + def reject(self, delivery_tag, requeue=False): + self.was_rejected = True + + self.channel._qos = MockQoS(self.channel) + self.channel.basic_reject('foo') + self.assertTrue(self.channel._qos.was_rejected) + + def test_basic_qos(self): + self.channel.basic_qos(prefetch_count=128) + self.assertEqual(self.channel._qos.prefetch_count, 128) + + def test_lookup__undeliverable(self, n='test_lookup__undeliverable'): + warnings.resetwarnings() + with warnings.catch_warnings(record=True) as log: + self.assertListEqual( + self.channel._lookup(n, n, 'ae.undeliver'), + ['ae.undeliver'], + ) + self.assertTrue(log) + self.assertIn('could not be delivered', log[0].message.args[0]) + + def test_context(self): + x = self.channel.__enter__() + self.assertIs(x, self.channel) + x.__exit__() + self.assertTrue(x.closed) + + def test_cycle_property(self): + self.assertTrue(self.channel.cycle) + + def test_flow(self): + with self.assertRaises(NotImplementedError): + self.channel.flow(False) + + def test_close_when_no_connection(self): + self.channel.connection = None + self.channel.close() + self.assertTrue(self.channel.closed) + + def test_drain_events_has_get_many(self): + c = self.channel + c._get_many = Mock() + c._poll = Mock() + c._consumers = [1] + c._qos = Mock() + c._qos.can_consume.return_value = True + + c.drain_events(timeout=10.0) + c._get_many.assert_called_with(c._active_queues, timeout=10.0) + + def test_get_exchanges(self): + self.channel.exchange_declare(exchange='foo') + self.assertTrue(self.channel.get_exchanges()) + + def test_basic_cancel_not_in_active_queues(self): + c = self.channel + c._consumers.add('x') + c._tag_to_queue['x'] = 'foo' + c._active_queues = Mock() + c._active_queues.remove.side_effect = ValueError() + + c.basic_cancel('x') + c._active_queues.remove.assert_called_with('foo') + + def test_basic_cancel_unknown_ctag(self): + self.assertIsNone(self.channel.basic_cancel('unknown-tag')) + + def test_list_bindings(self): + c = self.channel + c.exchange_declare(exchange='foo') + c.queue_declare(queue='q') + c.queue_bind(queue='q', exchange='foo', routing_key='rk') + + self.assertIn(('q', 'foo', 'rk'), list(c.list_bindings())) + + def test_after_reply_message_received(self): + c = self.channel + c.queue_delete = Mock() + c.after_reply_message_received('foo') + c.queue_delete.assert_called_with('foo') + + def test_queue_delete_unknown_queue(self): + self.assertIsNone(self.channel.queue_delete('xiwjqjwel')) + + def test_queue_declare_passive(self): + has_queue = self.channel._has_queue = Mock() + has_queue.return_value = False + with self.assertRaises(ChannelError): + self.channel.queue_declare(queue='21wisdjwqe', passive=True) + + +class test_Transport(Case): + + def setUp(self): + self.transport = client().transport + + def test_custom_polling_interval(self): + x = client(transport_options=dict(polling_interval=32.3)) + self.assertEqual(x.transport.polling_interval, 32.3) + + def test_close_connection(self): + c1 = self.transport.create_channel(self.transport) + c2 = self.transport.create_channel(self.transport) + self.assertEqual(len(self.transport.channels), 2) + self.transport.close_connection(self.transport) + self.assertFalse(self.transport.channels) + del(c1) # so pyflakes doesn't complain + del(c2) + + def test_drain_channel(self): + channel = self.transport.create_channel(self.transport) + with self.assertRaises(virtual.Empty): + self.transport._drain_channel(channel) diff --git a/kombu/tests/transport/virtual/test_exchange.py b/kombu/tests/transport/virtual/test_exchange.py new file mode 100644 index 0000000..ad590af --- /dev/null +++ b/kombu/tests/transport/virtual/test_exchange.py @@ -0,0 +1,161 @@ +from __future__ import absolute_import + +from kombu import Connection +from kombu.transport.virtual import exchange + +from kombu.tests.case import Case, Mock +from kombu.tests.mocks import Transport + + +class ExchangeCase(Case): + type = None + + def setUp(self): + if self.type: + self.e = self.type(Connection(transport=Transport).channel()) + + +class test_Direct(ExchangeCase): + type = exchange.DirectExchange + table = [('rFoo', None, 'qFoo'), + ('rFoo', None, 'qFox'), + ('rBar', None, 'qBar'), + ('rBaz', None, 'qBaz')] + + def test_lookup(self): + self.assertListEqual( + self.e.lookup(self.table, 'eFoo', 'rFoo', None), + ['qFoo', 'qFox'], + ) + self.assertListEqual( + self.e.lookup(self.table, 'eMoz', 'rMoz', 'DEFAULT'), + [], + ) + self.assertListEqual( + self.e.lookup(self.table, 'eBar', 'rBar', None), + ['qBar'], + ) + + +class test_Fanout(ExchangeCase): + type = exchange.FanoutExchange + table = [(None, None, 'qFoo'), + (None, None, 'qFox'), + (None, None, 'qBar')] + + def test_lookup(self): + self.assertListEqual( + self.e.lookup(self.table, 'eFoo', 'rFoo', None), + ['qFoo', 'qFox', 'qBar'], + ) + + def test_deliver_when_fanout_supported(self): + self.e.channel = Mock() + self.e.channel.supports_fanout = True + message = Mock() + + self.e.deliver(message, 'exchange', 'rkey') + self.e.channel._put_fanout.assert_called_with( + 'exchange', message, 'rkey', + ) + + def test_deliver_when_fanout_unsupported(self): + self.e.channel = Mock() + self.e.channel.supports_fanout = False + + self.e.deliver(Mock(), 'exchange', None) + self.assertFalse(self.e.channel._put_fanout.called) + + +class test_Topic(ExchangeCase): + type = exchange.TopicExchange + table = [ + ('stock.#', None, 'rFoo'), + ('stock.us.*', None, 'rBar'), + ] + + def setUp(self): + super(test_Topic, self).setUp() + self.table = [(rkey, self.e.key_to_pattern(rkey), queue) + for rkey, _, queue in self.table] + + def test_prepare_bind(self): + x = self.e.prepare_bind('qFoo', 'eFoo', 'stock.#', {}) + self.assertTupleEqual(x, ('stock.#', r'^stock\..*?$', 'qFoo')) + + def test_lookup(self): + self.assertListEqual( + self.e.lookup(self.table, 'eFoo', 'stock.us.nasdaq', None), + ['rFoo', 'rBar'], + ) + self.assertTrue(self.e._compiled) + self.assertListEqual( + self.e.lookup(self.table, 'eFoo', 'stock.europe.OSE', None), + ['rFoo'], + ) + self.assertListEqual( + self.e.lookup(self.table, 'eFoo', 'stockxeuropexOSE', None), + [], + ) + self.assertListEqual( + self.e.lookup(self.table, 'eFoo', + 'candy.schleckpulver.snap_crackle', None), + [], + ) + + def test_deliver(self): + self.e.channel = Mock() + self.e.channel._lookup.return_value = ('a', 'b') + message = Mock() + self.e.deliver(message, 'exchange', 'rkey') + + expected = [(('a', message), {}), + (('b', message), {})] + self.assertListEqual(self.e.channel._put.call_args_list, expected) + + +class test_ExchangeType(ExchangeCase): + type = exchange.ExchangeType + + def test_lookup(self): + with self.assertRaises(NotImplementedError): + self.e.lookup([], 'eFoo', 'rFoo', None) + + def test_prepare_bind(self): + self.assertTupleEqual( + self.e.prepare_bind('qFoo', 'eFoo', 'rFoo', {}), + ('rFoo', None, 'qFoo'), + ) + + def test_equivalent(self): + e1 = dict( + type='direct', + durable=True, + auto_delete=True, + arguments={}, + ) + self.assertTrue( + self.e.equivalent(e1, 'eFoo', 'direct', True, True, {}), + ) + self.assertFalse( + self.e.equivalent(e1, 'eFoo', 'topic', True, True, {}), + ) + self.assertFalse( + self.e.equivalent(e1, 'eFoo', 'direct', False, True, {}), + ) + self.assertFalse( + self.e.equivalent(e1, 'eFoo', 'direct', True, False, {}), + ) + self.assertFalse( + self.e.equivalent(e1, 'eFoo', 'direct', True, True, + {'expires': 3000}), + ) + e2 = dict(e1, arguments={'expires': 3000}) + self.assertTrue( + self.e.equivalent(e2, 'eFoo', 'direct', True, True, + {'expires': 3000}), + ) + self.assertFalse( + self.e.equivalent(e2, 'eFoo', 'direct', True, True, + {'expires': 6000}), + ) diff --git a/kombu/tests/transport/virtual/test_scheduling.py b/kombu/tests/transport/virtual/test_scheduling.py new file mode 100644 index 0000000..ccd7d4e --- /dev/null +++ b/kombu/tests/transport/virtual/test_scheduling.py @@ -0,0 +1,67 @@ +from __future__ import absolute_import + +from kombu.transport.virtual.scheduling import FairCycle + +from kombu.tests.case import Case + + +class MyEmpty(Exception): + pass + + +def consume(fun, n): + r = [] + for i in range(n): + r.append(fun()) + return r + + +class test_FairCycle(Case): + + def test_cycle(self): + resources = ['a', 'b', 'c', 'd', 'e'] + + def echo(r, timeout=None): + return r + + # cycle should be ['a', 'b', 'c', 'd', 'e', ... repeat] + cycle = FairCycle(echo, resources, MyEmpty) + for i in range(len(resources)): + self.assertEqual(cycle.get(), (resources[i], + resources[i])) + for i in range(len(resources)): + self.assertEqual(cycle.get(), (resources[i], + resources[i])) + + def test_cycle_breaks(self): + resources = ['a', 'b', 'c', 'd', 'e'] + + def echo(r): + if r == 'c': + raise MyEmpty(r) + return r + + cycle = FairCycle(echo, resources, MyEmpty) + self.assertEqual( + consume(cycle.get, len(resources)), + [('a', 'a'), ('b', 'b'), ('d', 'd'), + ('e', 'e'), ('a', 'a')], + ) + self.assertEqual( + consume(cycle.get, len(resources)), + [('b', 'b'), ('d', 'd'), ('e', 'e'), + ('a', 'a'), ('b', 'b')], + ) + cycle2 = FairCycle(echo, ['c', 'c'], MyEmpty) + with self.assertRaises(MyEmpty): + consume(cycle2.get, 3) + + def test_cycle_no_resources(self): + cycle = FairCycle(None, [], MyEmpty) + cycle.pos = 10 + + with self.assertRaises(MyEmpty): + cycle._next() + + def test__repr__(self): + self.assertTrue(repr(FairCycle(lambda x: x, [1, 2, 3], MyEmpty))) diff --git a/kombu/tests/utils/__init__.py b/kombu/tests/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kombu/tests/utils/test_amq_manager.py b/kombu/tests/utils/test_amq_manager.py new file mode 100644 index 0000000..b34b52e --- /dev/null +++ b/kombu/tests/utils/test_amq_manager.py @@ -0,0 +1,36 @@ +from __future__ import absolute_import + +from kombu import Connection + +from kombu.tests.case import Case, mask_modules, module_exists, patch + + +class test_get_manager(Case): + + @mask_modules('pyrabbit') + def test_without_pyrabbit(self): + with self.assertRaises(ImportError): + Connection('amqp://').get_manager() + + @module_exists('pyrabbit') + def test_with_pyrabbit(self): + with patch('pyrabbit.Client', create=True) as Client: + manager = Connection('amqp://').get_manager() + self.assertIsNotNone(manager) + Client.assert_called_with( + 'localhost:15672', 'guest', 'guest', + ) + + @module_exists('pyrabbit') + def test_transport_options(self): + with patch('pyrabbit.Client', create=True) as Client: + manager = Connection('amqp://', transport_options={ + 'manager_hostname': 'admin.mq.vandelay.com', + 'manager_port': 808, + 'manager_userid': 'george', + 'manager_password': 'bosco', + }).get_manager() + self.assertIsNotNone(manager) + Client.assert_called_with( + 'admin.mq.vandelay.com:808', 'george', 'bosco', + ) diff --git a/kombu/tests/utils/test_debug.py b/kombu/tests/utils/test_debug.py new file mode 100644 index 0000000..ea25cb7 --- /dev/null +++ b/kombu/tests/utils/test_debug.py @@ -0,0 +1,56 @@ +from __future__ import absolute_import + +import logging + +from kombu.utils.debug import ( + setup_logging, + Logwrapped, +) +from kombu.tests.case import Case, Mock, patch + + +class test_setup_logging(Case): + + def test_adds_handlers_sets_level(self): + with patch('kombu.utils.debug.get_logger') as get_logger: + logger = get_logger.return_value = Mock() + setup_logging(loggers=['kombu.test']) + + get_logger.assert_called_with('kombu.test') + + self.assertTrue(logger.addHandler.called) + logger.setLevel.assert_called_with(logging.DEBUG) + + +class test_Logwrapped(Case): + + def test_wraps(self): + with patch('kombu.utils.debug.get_logger') as get_logger: + logger = get_logger.return_value = Mock() + + W = Logwrapped(Mock(), 'kombu.test') + get_logger.assert_called_with('kombu.test') + self.assertIsNotNone(W.instance) + self.assertIs(W.logger, logger) + + W.instance.__repr__ = lambda s: 'foo' + self.assertEqual(repr(W), 'foo') + W.instance.some_attr = 303 + self.assertEqual(W.some_attr, 303) + + W.instance.some_method.__name__ = 'some_method' + W.some_method(1, 2, kw=1) + W.instance.some_method.assert_called_with(1, 2, kw=1) + + W.some_method() + W.instance.some_method.assert_called_with() + + W.some_method(kw=1) + W.instance.some_method.assert_called_with(kw=1) + + W.ident = 'ident' + W.some_method(kw=1) + self.assertTrue(logger.debug.called) + self.assertIn('ident', logger.debug.call_args[0][0]) + + self.assertEqual(dir(W), dir(W.instance)) diff --git a/kombu/tests/utils/test_encoding.py b/kombu/tests/utils/test_encoding.py new file mode 100644 index 0000000..fd710c3 --- /dev/null +++ b/kombu/tests/utils/test_encoding.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from __future__ import unicode_literals + +import sys + +from contextlib import contextmanager + +from kombu.five import bytes_t, string_t +from kombu.utils.encoding import safe_str, default_encoding + +from kombu.tests.case import Case, SkipTest, patch + + +@contextmanager +def clean_encoding(): + old_encoding = sys.modules.pop('kombu.utils.encoding', None) + import kombu.utils.encoding + try: + yield kombu.utils.encoding + finally: + if old_encoding: + sys.modules['kombu.utils.encoding'] = old_encoding + + +class test_default_encoding(Case): + + @patch('sys.getfilesystemencoding') + def test_default(self, getdefaultencoding): + getdefaultencoding.return_value = 'ascii' + with clean_encoding() as encoding: + enc = encoding.default_encoding() + if sys.platform.startswith('java'): + self.assertEqual(enc, 'utf-8') + else: + self.assertEqual(enc, 'ascii') + getdefaultencoding.assert_called_with() + + +class test_encoding_utils(Case): + + def setUp(self): + if sys.version_info >= (3, 0): + raise SkipTest('not relevant on py3k') + + def test_str_to_bytes(self): + with clean_encoding() as e: + self.assertIsInstance(e.str_to_bytes('foobar'), bytes_t) + + def test_from_utf8(self): + with clean_encoding() as e: + self.assertIsInstance(e.from_utf8('foobar'), bytes_t) + + def test_default_encode(self): + with clean_encoding() as e: + self.assertTrue(e.default_encode(b'foo')) + + +class test_safe_str(Case): + + def setUp(self): + self._cencoding = patch('sys.getfilesystemencoding') + self._encoding = self._cencoding.__enter__() + self._encoding.return_value = 'ascii' + + def tearDown(self): + self._cencoding.__exit__() + + def test_when_bytes(self): + self.assertEqual(safe_str('foo'), 'foo') + + def test_when_unicode(self): + self.assertIsInstance(safe_str('foo'), string_t) + + def test_when_encoding_utf8(self): + with patch('sys.getfilesystemencoding') as encoding: + encoding.return_value = 'utf-8' + self.assertEqual(default_encoding(), 'utf-8') + s = 'The quiæk fåx jømps øver the lazy dåg' + res = safe_str(s) + self.assertIsInstance(res, str) + + def test_when_containing_high_chars(self): + with patch('sys.getfilesystemencoding') as encoding: + encoding.return_value = 'ascii' + s = 'The quiæk fåx jømps øver the lazy dåg' + res = safe_str(s) + self.assertIsInstance(res, str) + self.assertEqual(len(s), len(res)) + + def test_when_not_string(self): + o = object() + self.assertEqual(safe_str(o), repr(o)) + + def test_when_unrepresentable(self): + + class O(object): + + def __repr__(self): + raise KeyError('foo') + + self.assertIn('= (3, 0): + from io import StringIO, BytesIO +else: + from StringIO import StringIO, StringIO as BytesIO # noqa + +from kombu import version_info_t +from kombu import utils +from kombu.utils.text import version_string_as_tuple +from kombu.five import string_t + +from kombu.tests.case import ( + Case, Mock, patch, + redirect_stdouts, mask_modules, module_exists, skip_if_module, +) + + +class OldString(object): + + def __init__(self, value): + self.value = value + + def __str__(self): + return self.value + + def split(self, *args, **kwargs): + return self.value.split(*args, **kwargs) + + def rsplit(self, *args, **kwargs): + return self.value.rsplit(*args, **kwargs) + + +class test_kombu_module(Case): + + def test_dir(self): + import kombu + self.assertTrue(dir(kombu)) + + +class test_utils(Case): + + def test_maybe_list(self): + self.assertEqual(utils.maybe_list(None), []) + self.assertEqual(utils.maybe_list(1), [1]) + self.assertEqual(utils.maybe_list([1, 2, 3]), [1, 2, 3]) + + def test_fxrange_no_repeatlast(self): + self.assertEqual(list(utils.fxrange(1.0, 3.0, 1.0)), + [1.0, 2.0, 3.0]) + + def test_fxrangemax(self): + self.assertEqual(list(utils.fxrangemax(1.0, 3.0, 1.0, 30.0)), + [1.0, 2.0, 3.0, 3.0, 3.0, 3.0, + 3.0, 3.0, 3.0, 3.0, 3.0]) + self.assertEqual(list(utils.fxrangemax(1.0, None, 1.0, 30.0)), + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]) + + def test_reprkwargs(self): + self.assertTrue(utils.reprkwargs({'foo': 'bar', 1: 2, 'k': 'v'})) + + def test_reprcall(self): + self.assertTrue( + utils.reprcall('add', (2, 2), {'copy': True}), + ) + + +class test_UUID(Case): + + def test_uuid4(self): + self.assertNotEqual(utils.uuid4(), + utils.uuid4()) + + def test_uuid(self): + i1 = utils.uuid() + i2 = utils.uuid() + self.assertIsInstance(i1, str) + self.assertNotEqual(i1, i2) + + @skip_if_module('__pypy__') + def test_uuid_without_ctypes(self): + old_utils = sys.modules.pop('kombu.utils') + + @mask_modules('ctypes') + def with_ctypes_masked(): + from kombu.utils import ctypes, uuid + + self.assertIsNone(ctypes) + tid = uuid() + self.assertTrue(tid) + self.assertIsInstance(tid, string_t) + + try: + with_ctypes_masked() + finally: + sys.modules['celery.utils'] = old_utils + + +class test_Misc(Case): + + def test_kwdict(self): + + def f(**kwargs): + return kwargs + + kw = {'foo': 'foo', + 'bar': 'bar'} + self.assertTrue(f(**utils.kwdict(kw))) + + +class MyStringIO(StringIO): + + def close(self): + pass + + +class MyBytesIO(BytesIO): + + def close(self): + pass + + +class test_emergency_dump_state(Case): + + @redirect_stdouts + def test_dump(self, stdout, stderr): + fh = MyBytesIO() + + utils.emergency_dump_state({'foo': 'bar'}, open_file=lambda n, m: fh) + self.assertDictEqual(pickle.loads(fh.getvalue()), {'foo': 'bar'}) + self.assertTrue(stderr.getvalue()) + self.assertFalse(stdout.getvalue()) + + @redirect_stdouts + def test_dump_second_strategy(self, stdout, stderr): + fh = MyStringIO() + + def raise_something(*args, **kwargs): + raise KeyError('foo') + + utils.emergency_dump_state( + {'foo': 'bar'}, + open_file=lambda n, m: fh, dump=raise_something + ) + self.assertIn('foo', fh.getvalue()) + self.assertIn('bar', fh.getvalue()) + self.assertTrue(stderr.getvalue()) + self.assertFalse(stdout.getvalue()) + + +def insomnia(fun): + + @wraps(fun) + def _inner(*args, **kwargs): + def mysleep(i): + pass + + prev_sleep = utils.sleep + utils.sleep = mysleep + try: + return fun(*args, **kwargs) + finally: + utils.sleep = prev_sleep + + return _inner + + +class test_retry_over_time(Case): + + def setUp(self): + self.index = 0 + + class Predicate(Exception): + pass + + def myfun(self): + if self.index < 9: + raise self.Predicate() + return 42 + + def errback(self, exc, intervals, retries): + interval = next(intervals) + sleepvals = (None, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 16.0) + self.index += 1 + self.assertEqual(interval, sleepvals[self.index]) + return interval + + @insomnia + def test_simple(self): + prev_count, utils.count = utils.count, Mock() + try: + utils.count.return_value = list(range(1)) + x = utils.retry_over_time(self.myfun, self.Predicate, + errback=None, interval_max=14) + self.assertIsNone(x) + utils.count.return_value = list(range(10)) + cb = Mock() + x = utils.retry_over_time(self.myfun, self.Predicate, + errback=self.errback, callback=cb, + interval_max=14) + self.assertEqual(x, 42) + self.assertEqual(self.index, 9) + cb.assert_called_with() + finally: + utils.count = prev_count + + @insomnia + def test_retry_once(self): + with self.assertRaises(self.Predicate): + utils.retry_over_time( + self.myfun, self.Predicate, + max_retries=1, errback=self.errback, interval_max=14, + ) + self.assertEqual(self.index, 1) + # no errback + with self.assertRaises(self.Predicate): + utils.retry_over_time( + self.myfun, self.Predicate, + max_retries=1, errback=None, interval_max=14, + ) + + @insomnia + def test_retry_always(self): + Predicate = self.Predicate + + class Fun(object): + + def __init__(self): + self.calls = 0 + + def __call__(self, *args, **kwargs): + try: + if self.calls >= 10: + return 42 + raise Predicate() + finally: + self.calls += 1 + fun = Fun() + + self.assertEqual( + utils.retry_over_time( + fun, self.Predicate, + max_retries=0, errback=None, interval_max=14, + ), + 42, + ) + self.assertEqual(fun.calls, 11) + + +class test_cached_property(Case): + + def test_deleting(self): + + class X(object): + xx = False + + @utils.cached_property + def foo(self): + return 42 + + @foo.deleter # noqa + def foo(self, value): + self.xx = value + + x = X() + del(x.foo) + self.assertFalse(x.xx) + x.__dict__['foo'] = 'here' + del(x.foo) + self.assertEqual(x.xx, 'here') + + def test_when_access_from_class(self): + + class X(object): + xx = None + + @utils.cached_property + def foo(self): + return 42 + + @foo.setter # noqa + def foo(self, value): + self.xx = 10 + + desc = X.__dict__['foo'] + self.assertIs(X.foo, desc) + + self.assertIs(desc.__get__(None), desc) + self.assertIs(desc.__set__(None, 1), desc) + self.assertIs(desc.__delete__(None), desc) + self.assertTrue(desc.setter(1)) + + x = X() + x.foo = 30 + self.assertEqual(x.xx, 10) + + del(x.foo) + + +class test_symbol_by_name(Case): + + def test_instance_returns_instance(self): + instance = object() + self.assertIs(utils.symbol_by_name(instance), instance) + + def test_returns_default(self): + default = object() + self.assertIs( + utils.symbol_by_name('xyz.ryx.qedoa.weq:foz', default=default), + default, + ) + + def test_no_default(self): + with self.assertRaises(ImportError): + utils.symbol_by_name('xyz.ryx.qedoa.weq:foz') + + def test_imp_reraises_ValueError(self): + imp = Mock() + imp.side_effect = ValueError() + with self.assertRaises(ValueError): + utils.symbol_by_name('kombu.Connection', imp=imp) + + def test_package(self): + from kombu.entity import Exchange + self.assertIs( + utils.symbol_by_name('.entity:Exchange', package='kombu'), + Exchange, + ) + self.assertTrue(utils.symbol_by_name(':Consumer', package='kombu')) + + +class test_ChannelPromise(Case): + + def test_repr(self): + obj = Mock(name='cb') + self.assertIn( + 'promise', + repr(utils.ChannelPromise(obj)), + ) + self.assertFalse(obj.called) + + +class test_entrypoints(Case): + + @mask_modules('pkg_resources') + def test_without_pkg_resources(self): + self.assertListEqual(list(utils.entrypoints('kombu.test')), []) + + @module_exists('pkg_resources') + def test_with_pkg_resources(self): + with patch('pkg_resources.iter_entry_points', create=True) as iterep: + eps = iterep.return_value = [Mock(), Mock()] + + self.assertTrue(list(utils.entrypoints('kombu.test'))) + iterep.assert_called_with('kombu.test') + eps[0].load.assert_called_with() + eps[1].load.assert_called_with() + + +class test_shufflecycle(Case): + + def test_shuffles(self): + prev_repeat, utils.repeat = utils.repeat, Mock() + try: + utils.repeat.return_value = list(range(10)) + values = set(['A', 'B', 'C']) + cycle = utils.shufflecycle(values) + seen = set() + for i in range(10): + next(cycle) + utils.repeat.assert_called_with(None) + self.assertTrue(seen.issubset(values)) + with self.assertRaises(StopIteration): + next(cycle) + next(cycle) + finally: + utils.repeat = prev_repeat + + +class test_version_string_as_tuple(Case): + + def test_versions(self): + self.assertTupleEqual( + version_string_as_tuple('3'), + version_info_t(3, 0, 0, '', ''), + ) + self.assertTupleEqual( + version_string_as_tuple('3.3'), + version_info_t(3, 3, 0, '', ''), + ) + self.assertTupleEqual( + version_string_as_tuple('3.3.1'), + version_info_t(3, 3, 1, '', ''), + ) + self.assertTupleEqual( + version_string_as_tuple('3.3.1a3'), + version_info_t(3, 3, 1, 'a3', ''), + ) + self.assertTupleEqual( + version_string_as_tuple('3.3.1a3-40c32'), + version_info_t(3, 3, 1, 'a3', '40c32'), + ) + self.assertEqual( + version_string_as_tuple('3.3.1.a3.40c32'), + version_info_t(3, 3, 1, 'a3', '40c32'), + ) diff --git a/kombu/transport/SLMQ.py b/kombu/transport/SLMQ.py new file mode 100644 index 0000000..449bc2f --- /dev/null +++ b/kombu/transport/SLMQ.py @@ -0,0 +1,186 @@ +""" +kombu.transport.SLMQ +==================== + +SoftLayer Message Queue transport. + +""" +from __future__ import absolute_import + +import socket +import string + +from anyjson import loads, dumps + +import os + +from kombu.five import Empty, text_t +from kombu.utils import cached_property # , uuid +from kombu.utils.encoding import bytes_to_str, safe_str + +from . import virtual + +try: + from softlayer_messaging import get_client + from softlayer_messaging.errors import ResponseError +except ImportError: # pragma: no cover + get_client = ResponseError = None # noqa + +# dots are replaced by dash, all other punctuation replaced by underscore. +CHARS_REPLACE_TABLE = dict( + (ord(c), 0x5f) for c in string.punctuation if c not in '_') + + +class Channel(virtual.Channel): + default_visibility_timeout = 1800 # 30 minutes. + domain_format = 'kombu%(vhost)s' + _slmq = None + _queue_cache = {} + _noack_queues = set() + + def __init__(self, *args, **kwargs): + if get_client is None: + raise ImportError( + 'SLMQ transport requires the softlayer_messaging library', + ) + super(Channel, self).__init__(*args, **kwargs) + queues = self.slmq.queues() + for queue in queues: + self._queue_cache[queue] = queue + + def basic_consume(self, queue, no_ack, *args, **kwargs): + if no_ack: + self._noack_queues.add(queue) + return super(Channel, self).basic_consume(queue, no_ack, + *args, **kwargs) + + def basic_cancel(self, consumer_tag): + if consumer_tag in self._consumers: + queue = self._tag_to_queue[consumer_tag] + self._noack_queues.discard(queue) + return super(Channel, self).basic_cancel(consumer_tag) + + def entity_name(self, name, table=CHARS_REPLACE_TABLE): + """Format AMQP queue name into a valid SLQS queue name.""" + return text_t(safe_str(name)).translate(table) + + def _new_queue(self, queue, **kwargs): + """Ensures a queue exists in SLQS.""" + queue = self.entity_name(self.queue_name_prefix + queue) + try: + return self._queue_cache[queue] + except KeyError: + try: + self.slmq.create_queue( + queue, visibility_timeout=self.visibility_timeout) + except ResponseError: + pass + q = self._queue_cache[queue] = self.slmq.queue(queue) + return q + + def _delete(self, queue, *args): + """delete queue by name.""" + queue_name = self.entity_name(queue) + self._queue_cache.pop(queue_name, None) + self.slmq.queue(queue_name).delete(force=True) + super(Channel, self)._delete(queue_name) + + def _put(self, queue, message, **kwargs): + """Put message onto queue.""" + q = self._new_queue(queue) + q.push(dumps(message)) + + def _get(self, queue): + """Try to retrieve a single message off ``queue``.""" + q = self._new_queue(queue) + rs = q.pop(1) + if rs['items']: + m = rs['items'][0] + payload = loads(bytes_to_str(m['body'])) + if queue in self._noack_queues: + q.message(m['id']).delete() + else: + payload['properties']['delivery_info'].update({ + 'slmq_message_id': m['id'], 'slmq_queue_name': q.name}) + return payload + raise Empty() + + def basic_ack(self, delivery_tag): + delivery_info = self.qos.get(delivery_tag).delivery_info + try: + queue = delivery_info['slmq_queue_name'] + except KeyError: + pass + else: + self.delete_message(queue, delivery_info['slmq_message_id']) + super(Channel, self).basic_ack(delivery_tag) + + def _size(self, queue): + """Return the number of messages in a queue.""" + return self._new_queue(queue).detail()['message_count'] + + def _purge(self, queue): + """Delete all current messages in a queue.""" + q = self._new_queue(queue) + n = 0 + l = q.pop(10) + while l['items']: + for m in l['items']: + self.delete_message(queue, m['id']) + n += 1 + l = q.pop(10) + return n + + def delete_message(self, queue, message_id): + q = self.slmq.queue(self.entity_name(queue)) + return q.message(message_id).delete() + + @property + def slmq(self): + if self._slmq is None: + conninfo = self.conninfo + account = os.environ.get('SLMQ_ACCOUNT', conninfo.virtual_host) + user = os.environ.get('SL_USERNAME', conninfo.userid) + api_key = os.environ.get('SL_API_KEY', conninfo.password) + host = os.environ.get('SLMQ_HOST', conninfo.hostname) + port = os.environ.get('SLMQ_PORT', conninfo.port) + secure = bool(os.environ.get( + 'SLMQ_SECURE', self.transport_options.get('secure')) or True, + ) + endpoint = '{0}://{1}{2}'.format( + 'https' if secure else 'http', host, + ':{0}'.format(port) if port else '', + ) + + self._slmq = get_client(account, endpoint=endpoint) + self._slmq.authenticate(user, api_key) + return self._slmq + + @property + def conninfo(self): + return self.connection.client + + @property + def transport_options(self): + return self.connection.client.transport_options + + @cached_property + def visibility_timeout(self): + return (self.transport_options.get('visibility_timeout') or + self.default_visibility_timeout) + + @cached_property + def queue_name_prefix(self): + return self.transport_options.get('queue_name_prefix', '') + + +class Transport(virtual.Transport): + Channel = Channel + + polling_interval = 1 + default_port = None + connection_errors = ( + virtual.Transport.connection_errors + ( + ResponseError, socket.error + ) + ) diff --git a/kombu/transport/SQS.py b/kombu/transport/SQS.py new file mode 100644 index 0000000..68cb053 --- /dev/null +++ b/kombu/transport/SQS.py @@ -0,0 +1,539 @@ +""" +kombu.transport.SQS +=================== + +Amazon SQS transport module for Kombu. This package implements an AMQP-like +interface on top of Amazons SQS service, with the goal of being optimized for +high performance and reliability. + +The default settings for this module are focused now on high performance in +task queue situations where tasks are small, idempotent and run very fast. + +SQS Features supported by this transport: + Long Polling: + http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ + sqs-long-polling.html + + Long polling is enabled by setting the `wait_time_seconds` transport + option to a number > 1. Amazon supports up to 20 seconds. This is + disabled for now, but will be enabled by default in the near future. + + Batch API Actions: + http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ + sqs-batch-api.html + + The default behavior of the SQS Channel.drain_events() method is to + request up to the 'prefetch_count' messages on every request to SQS. + These messages are stored locally in a deque object and passed back + to the Transport until the deque is empty, before triggering a new + API call to Amazon. + + This behavior dramatically speeds up the rate that you can pull tasks + from SQS when you have short-running tasks (or a large number of workers). + + When a Celery worker has multiple queues to monitor, it will pull down + up to 'prefetch_count' messages from queueA and work on them all before + moving on to queueB. If queueB is empty, it will wait up until + 'polling_interval' expires before moving back and checking on queueA. +""" + +from __future__ import absolute_import + +import collections +import socket +import string + +from anyjson import loads, dumps + +import boto +from boto import exception +from boto import sdb as _sdb +from boto import sqs as _sqs +from boto.sdb.domain import Domain +from boto.sdb.connection import SDBConnection +from boto.sqs.connection import SQSConnection +from boto.sqs.message import Message + +from kombu.five import Empty, range, text_t +from kombu.log import get_logger +from kombu.utils import cached_property, uuid +from kombu.utils.encoding import bytes_to_str, safe_str +from kombu.transport.virtual import scheduling + +from . import virtual + +logger = get_logger(__name__) + +# dots are replaced by dash, all other punctuation +# replaced by underscore. +CHARS_REPLACE_TABLE = dict((ord(c), 0x5f) + for c in string.punctuation if c not in '-_.') +CHARS_REPLACE_TABLE[0x2e] = 0x2d # '.' -> '-' + + +def maybe_int(x): + try: + return int(x) + except ValueError: + return x +BOTO_VERSION = tuple(maybe_int(part) for part in boto.__version__.split('.')) +W_LONG_POLLING = BOTO_VERSION >= (2, 8) + +#: SQS bulk get supports a maximum of 10 messages at a time. +SQS_MAX_MESSAGES = 10 + + +class Table(Domain): + """Amazon SimpleDB domain describing the message routing table.""" + # caches queues already bound, so we don't have to declare them again. + _already_bound = set() + + def routes_for(self, exchange): + """Iterator giving all routes for an exchange.""" + return self.select("""WHERE exchange = '%s'""" % exchange) + + def get_queue(self, queue): + """Get binding for queue.""" + qid = self._get_queue_id(queue) + if qid: + return self.get_item(qid) + + def create_binding(self, queue): + """Get binding item for queue. + + Creates the item if it doesn't exist. + + """ + item = self.get_queue(queue) + if item: + return item, item['id'] + id = uuid() + return self.new_item(id), id + + def queue_bind(self, exchange, routing_key, pattern, queue): + if queue not in self._already_bound: + binding, id = self.create_binding(queue) + binding.update(exchange=exchange, + routing_key=routing_key or '', + pattern=pattern or '', + queue=queue or '', + id=id) + binding.save() + self._already_bound.add(queue) + + def queue_delete(self, queue): + """delete queue by name.""" + self._already_bound.discard(queue) + item = self._get_queue_item(queue) + if item: + self.delete_item(item) + + def exchange_delete(self, exchange): + """Delete all routes for `exchange`.""" + for item in self.routes_for(exchange): + self.delete_item(item['id']) + + def get_item(self, item_name): + """Uses `consistent_read` by default.""" + # Domain is an old-style class, can't use super(). + for consistent_read in (False, True): + item = Domain.get_item(self, item_name, consistent_read) + if item: + return item + + def select(self, query='', next_token=None, + consistent_read=True, max_items=None): + """Uses `consistent_read` by default.""" + query = """SELECT * FROM `%s` %s""" % (self.name, query) + return Domain.select(self, query, next_token, + consistent_read, max_items) + + def _try_first(self, query='', **kwargs): + for c in (False, True): + for item in self.select(query, consistent_read=c, **kwargs): + return item + + def get_exchanges(self): + return list(set(i['exchange'] for i in self.select())) + + def _get_queue_item(self, queue): + return self._try_first("""WHERE queue = '%s' limit 1""" % queue) + + def _get_queue_id(self, queue): + item = self._get_queue_item(queue) + if item: + return item['id'] + + +class Channel(virtual.Channel): + Table = Table + + default_region = 'us-east-1' + default_visibility_timeout = 1800 # 30 minutes. + default_wait_time_seconds = 0 # disabled see #198 + domain_format = 'kombu%(vhost)s' + _sdb = None + _sqs = None + _queue_cache = {} + _noack_queues = set() + + def __init__(self, *args, **kwargs): + super(Channel, self).__init__(*args, **kwargs) + + # SQS blows up when you try to create a new queue if one already + # exists with a different visibility_timeout, so this prepopulates + # the queue_cache to protect us from recreating + # queues that are known to already exist. + queues = self.sqs.get_all_queues(prefix=self.queue_name_prefix) + for queue in queues: + self._queue_cache[queue.name] = queue + self._fanout_queues = set() + + # The drain_events() method stores extra messages in a local + # Deque object. This allows multiple messages to be requested from + # SQS at once for performance, but maintains the same external API + # to the caller of the drain_events() method. + self._queue_message_cache = collections.deque() + + def basic_consume(self, queue, no_ack, *args, **kwargs): + if no_ack: + self._noack_queues.add(queue) + return super(Channel, self).basic_consume( + queue, no_ack, *args, **kwargs + ) + + def basic_cancel(self, consumer_tag): + if consumer_tag in self._consumers: + queue = self._tag_to_queue[consumer_tag] + self._noack_queues.discard(queue) + return super(Channel, self).basic_cancel(consumer_tag) + + def drain_events(self, timeout=None): + """Return a single payload message from one of our queues. + + :raises Empty: if no messages available. + + """ + # If we're not allowed to consume or have no consumers, raise Empty + if not self._consumers or not self.qos.can_consume(): + raise Empty() + message_cache = self._queue_message_cache + + # Check if there are any items in our buffer. If there are any, pop + # off that queue first. + try: + return message_cache.popleft() + except IndexError: + pass + + # At this point, go and get more messages from SQS + res, queue = self._poll(self.cycle, timeout=timeout) + message_cache.extend((r, queue) for r in res) + + # Now try to pop off the queue again. + try: + return message_cache.popleft() + except IndexError: + raise Empty() + + def _reset_cycle(self): + """Reset the consume cycle. + + :returns: a FairCycle object that points to our _get_bulk() method + rather than the standard _get() method. This allows for multiple + messages to be returned at once from SQS (based on the prefetch + limit). + + """ + self._cycle = scheduling.FairCycle( + self._get_bulk, self._active_queues, Empty, + ) + + def entity_name(self, name, table=CHARS_REPLACE_TABLE): + """Format AMQP queue name into a legal SQS queue name.""" + return text_t(safe_str(name)).translate(table) + + def _new_queue(self, queue, **kwargs): + """Ensure a queue with given name exists in SQS.""" + # Translate to SQS name for consistency with initial + # _queue_cache population. + queue = self.entity_name(self.queue_name_prefix + queue) + try: + return self._queue_cache[queue] + except KeyError: + q = self._queue_cache[queue] = self.sqs.create_queue( + queue, self.visibility_timeout, + ) + return q + + def queue_bind(self, queue, exchange=None, routing_key='', + arguments=None, **kwargs): + super(Channel, self).queue_bind(queue, exchange, routing_key, + arguments, **kwargs) + if self.typeof(exchange).type == 'fanout': + self._fanout_queues.add(queue) + + def _queue_bind(self, *args): + """Bind ``queue`` to ``exchange`` with routing key. + + Route will be stored in SDB if so enabled. + + """ + if self.supports_fanout: + self.table.queue_bind(*args) + + def get_table(self, exchange): + """Get routing table. + + Retrieved from SDB if :attr:`supports_fanout`. + + """ + if self.supports_fanout: + return [(r['routing_key'], r['pattern'], r['queue']) + for r in self.table.routes_for(exchange)] + return super(Channel, self).get_table(exchange) + + def get_exchanges(self): + if self.supports_fanout: + return self.table.get_exchanges() + return super(Channel, self).get_exchanges() + + def _delete(self, queue, *args): + """delete queue by name.""" + if self.supports_fanout: + self.table.queue_delete(queue) + super(Channel, self)._delete(queue) + self._queue_cache.pop(queue, None) + + def exchange_delete(self, exchange, **kwargs): + """Delete exchange by name.""" + if self.supports_fanout: + self.table.exchange_delete(exchange) + super(Channel, self).exchange_delete(exchange, **kwargs) + + def _has_queue(self, queue, **kwargs): + """Return True if ``queue`` was previously declared.""" + if self.supports_fanout: + return bool(self.table.get_queue(queue)) + return super(Channel, self)._has_queue(queue) + + def _put(self, queue, message, **kwargs): + """Put message onto queue.""" + q = self._new_queue(queue) + m = Message() + m.set_body(dumps(message)) + q.write(m) + + def _put_fanout(self, exchange, message, routing_key, **kwargs): + """Deliver fanout message to all queues in ``exchange``.""" + for route in self.table.routes_for(exchange): + self._put(route['queue'], message, **kwargs) + + def _get_from_sqs(self, queue, count=1): + """Retrieve messages from SQS and returns the raw SQS message objects. + + :returns: List of SQS message objects + + """ + q = self._new_queue(queue) + if W_LONG_POLLING and queue not in self._fanout_queues: + return q.get_messages( + count, wait_time_seconds=self.wait_time_seconds, + ) + else: # boto < 2.8 + return q.get_messages(count) + + def _message_to_python(self, message, queue_name, queue): + payload = loads(bytes_to_str(message.get_body())) + if queue_name in self._noack_queues: + queue.delete_message(message) + else: + payload['properties']['delivery_info'].update({ + 'sqs_message': message, 'sqs_queue': queue, + }) + return payload + + def _messages_to_python(self, messages, queue): + """Convert a list of SQS Message objects into Payloads. + + This method handles converting SQS Message objects into + Payloads, and appropriately updating the queue depending on + the 'ack' settings for that queue. + + :param messages: A list of SQS Message objects. + :param queue: String name representing the queue they came from + + :returns: A list of Payload objects + + """ + q = self._new_queue(queue) + return [self._message_to_python(m, queue, q) for m in messages] + + def _get_bulk(self, queue, max_if_unlimited=SQS_MAX_MESSAGES): + """Try to retrieve multiple messages off ``queue``. + + Where _get() returns a single Payload object, this method returns a + list of Payload objects. The number of objects returned is determined + by the total number of messages available in the queue and the + number of messages that the QoS object allows (based on the + prefetch_count). + + .. note:: + Ignores QoS limits so caller is responsible for checking + that we are allowed to consume at least one message from the + queue. get_bulk will then ask QoS for an estimate of + the number of extra messages that we can consume. + + args: + queue: The queue name (string) to pull from + + returns: + payloads: A list of payload objects returned + """ + # drain_events calls `can_consume` first, consuming + # a token, so we know that we are allowed to consume at least + # one message. + maxcount = self.qos.can_consume_max_estimate() + maxcount = max_if_unlimited if maxcount is None else max(maxcount, 1) + if maxcount: + messages = self._get_from_sqs( + queue, count=min(maxcount, SQS_MAX_MESSAGES), + ) + + if messages: + return self._messages_to_python(messages, queue) + raise Empty() + + def _get(self, queue): + """Try to retrieve a single message off ``queue``.""" + messages = self._get_from_sqs(queue, count=1) + + if messages: + return self._messages_to_python(messages, queue)[0] + raise Empty() + + def _restore(self, message, + unwanted_delivery_info=('sqs_message', 'sqs_queue')): + for unwanted_key in unwanted_delivery_info: + # Remove objects that aren't JSON serializable (Issue #1108). + message.delivery_info.pop(unwanted_key, None) + return super(Channel, self)._restore(message) + + def basic_ack(self, delivery_tag): + delivery_info = self.qos.get(delivery_tag).delivery_info + try: + queue = delivery_info['sqs_queue'] + except KeyError: + pass + else: + queue.delete_message(delivery_info['sqs_message']) + super(Channel, self).basic_ack(delivery_tag) + + def _size(self, queue): + """Return the number of messages in a queue.""" + return self._new_queue(queue).count() + + def _purge(self, queue): + """Delete all current messages in a queue.""" + q = self._new_queue(queue) + # SQS is slow at registering messages, so run for a few + # iterations to ensure messages are deleted. + size = 0 + for i in range(10): + size += q.count() + if not size: + break + q.clear() + return size + + def close(self): + super(Channel, self).close() + for conn in (self._sqs, self._sdb): + if conn: + try: + conn.close() + except AttributeError as exc: # FIXME ??? + if "can't set attribute" not in str(exc): + raise + + def _get_regioninfo(self, regions): + if self.region: + for _r in regions: + if _r.name == self.region: + return _r + + def _aws_connect_to(self, fun, regions): + conninfo = self.conninfo + region = self._get_regioninfo(regions) + return fun(region=region, + aws_access_key_id=conninfo.userid, + aws_secret_access_key=conninfo.password, + port=conninfo.port) + + @property + def sqs(self): + if self._sqs is None: + self._sqs = self._aws_connect_to(SQSConnection, _sqs.regions()) + return self._sqs + + @property + def sdb(self): + if self._sdb is None: + self._sdb = self._aws_connect_to(SDBConnection, _sdb.regions()) + return self._sdb + + @property + def table(self): + name = self.entity_name( + self.domain_format % {'vhost': self.conninfo.virtual_host}) + d = self.sdb.get_object( + 'CreateDomain', {'DomainName': name}, self.Table) + d.name = name + return d + + @property + def conninfo(self): + return self.connection.client + + @property + def transport_options(self): + return self.connection.client.transport_options + + @cached_property + def visibility_timeout(self): + return (self.transport_options.get('visibility_timeout') or + self.default_visibility_timeout) + + @cached_property + def queue_name_prefix(self): + return self.transport_options.get('queue_name_prefix', '') + + @cached_property + def supports_fanout(self): + return self.transport_options.get('sdb_persistence', False) + + @cached_property + def region(self): + return self.transport_options.get('region') or self.default_region + + @cached_property + def wait_time_seconds(self): + return self.transport_options.get('wait_time_seconds', + self.default_wait_time_seconds) + + +class Transport(virtual.Transport): + Channel = Channel + + polling_interval = 1 + wait_time_seconds = 0 + default_port = None + connection_errors = ( + virtual.Transport.connection_errors + + (exception.SQSError, socket.error) + ) + channel_errors = ( + virtual.Transport.channel_errors + (exception.SQSDecodeError, ) + ) + driver_type = 'sqs' + driver_name = 'sqs' diff --git a/kombu/transport/__init__.py b/kombu/transport/__init__.py new file mode 100644 index 0000000..10d62e9 --- /dev/null +++ b/kombu/transport/__init__.py @@ -0,0 +1,109 @@ +""" +kombu.transport +=============== + +Built-in transports. + +""" +from __future__ import absolute_import + +from kombu.five import string_t +from kombu.syn import _detect_environment +from kombu.utils import symbol_by_name + + +def supports_librabbitmq(): + if _detect_environment() == 'default': + try: + import librabbitmq # noqa + except ImportError: # pragma: no cover + pass + else: # pragma: no cover + return True + + +def _ghettoq(name, new, alias=None): + xxx = new # stupid enclosing + + def __inner(): + import warnings + _new = callable(xxx) and xxx() or xxx + gtransport = 'ghettoq.taproot.{0}'.format(name) + ktransport = 'kombu.transport.{0}.Transport'.format(_new) + this = alias or name + warnings.warn(""" + Ghettoq does not work with Kombu, but there is now a built-in version + of the {0} transport. + + You should replace {1!r} with: {2!r} + """.format(name, gtransport, this)) + return ktransport + + return __inner + + +TRANSPORT_ALIASES = { + 'amqp': 'kombu.transport.pyamqp:Transport', + 'pyamqp': 'kombu.transport.pyamqp:Transport', + 'librabbitmq': 'kombu.transport.librabbitmq:Transport', + 'memory': 'kombu.transport.memory:Transport', + 'redis': 'kombu.transport.redis:Transport', + 'SQS': 'kombu.transport.SQS:Transport', + 'sqs': 'kombu.transport.SQS:Transport', + 'beanstalk': 'kombu.transport.beanstalk:Transport', + 'mongodb': 'kombu.transport.mongodb:Transport', + 'couchdb': 'kombu.transport.couchdb:Transport', + 'zookeeper': 'kombu.transport.zookeeper:Transport', + 'django': 'kombu.transport.django:Transport', + 'sqlalchemy': 'kombu.transport.sqlalchemy:Transport', + 'sqla': 'kombu.transport.sqlalchemy:Transport', + 'SLMQ': 'kombu.transport.SLMQ.Transport', + 'slmq': 'kombu.transport.SLMQ.Transport', + 'ghettoq.taproot.Redis': _ghettoq('Redis', 'redis', 'redis'), + 'ghettoq.taproot.Database': _ghettoq('Database', 'django', 'django'), + 'ghettoq.taproot.MongoDB': _ghettoq('MongoDB', 'mongodb'), + 'ghettoq.taproot.Beanstalk': _ghettoq('Beanstalk', 'beanstalk'), + 'ghettoq.taproot.CouchDB': _ghettoq('CouchDB', 'couchdb'), + 'filesystem': 'kombu.transport.filesystem:Transport', + 'zeromq': 'kombu.transport.zmq:Transport', + 'zmq': 'kombu.transport.zmq:Transport', + 'amqplib': 'kombu.transport.amqplib:Transport', +} + +_transport_cache = {} + + +def resolve_transport(transport=None): + if isinstance(transport, string_t): + try: + transport = TRANSPORT_ALIASES[transport] + except KeyError: + if '.' not in transport and ':' not in transport: + from kombu.utils.text import fmatch_best + alt = fmatch_best(transport, TRANSPORT_ALIASES) + if alt: + raise KeyError( + 'No such transport: {0}. Did you mean {1}?'.format( + transport, alt)) + raise KeyError('No such transport: {0}'.format(transport)) + else: + if callable(transport): + transport = transport() + return symbol_by_name(transport) + return transport + + +def get_transport_cls(transport=None): + """Get transport class by name. + + The transport string is the full path to a transport class, e.g.:: + + "kombu.transport.pyamqp:Transport" + + If the name does not include `"."` (is not fully qualified), + the alias table will be consulted. + + """ + if transport not in _transport_cache: + _transport_cache[transport] = resolve_transport(transport) + return _transport_cache[transport] diff --git a/kombu/transport/amqplib.py b/kombu/transport/amqplib.py new file mode 100644 index 0000000..5f4dbac --- /dev/null +++ b/kombu/transport/amqplib.py @@ -0,0 +1,402 @@ +""" +kombu.transport.amqplib +======================= + +amqplib transport. + +""" +from __future__ import absolute_import + +import errno +import socket + +try: + from ssl import SSLError +except ImportError: + class SSLError(Exception): # noqa + pass +from struct import unpack + + +class NA(object): + pass + +try: + from amqplib import client_0_8 as amqp + from amqplib.client_0_8 import transport + from amqplib.client_0_8.channel import Channel as _Channel + from amqplib.client_0_8.exceptions import AMQPConnectionException + from amqplib.client_0_8.exceptions import AMQPChannelException +except ImportError: # pragma: no cover + + class NAx(object): + pass + amqp = NA + amqp.Connection = NA + transport = _Channel = NA # noqa + # Sphinx crashes if this is NA, must be different class + transport.TCPTransport = transport.SSLTransport = NAx + AMQPConnectionException = AMQPChannelException = NA # noqa + + +from kombu.five import items +from kombu.utils.encoding import str_to_bytes +from kombu.utils.amq_manager import get_manager + +from . import base + +DEFAULT_PORT = 5672 +HAS_MSG_PEEK = hasattr(socket, 'MSG_PEEK') + +# amqplib's handshake mistakenly identifies as protocol version 1191, +# this breaks in RabbitMQ tip, which no longer falls back to +# 0-8 for unknown ids. +transport.AMQP_PROTOCOL_HEADER = str_to_bytes('AMQP\x01\x01\x08\x00') + + +# - fixes warnings when socket is not connected. +class TCPTransport(transport.TCPTransport): + + def read_frame(self): + frame_type, channel, size = unpack('>BHI', self._read(7, True)) + payload = self._read(size) + ch = ord(self._read(1)) + if ch == 206: # '\xce' + return frame_type, channel, payload + else: + raise Exception( + 'Framing Error, received 0x%02x while expecting 0xce' % ch) + + def _read(self, n, initial=False): + read_buffer = self._read_buffer + while len(read_buffer) < n: + try: + s = self.sock.recv(n - len(read_buffer)) + except socket.error as exc: + if not initial and exc.errno in (errno.EAGAIN, errno.EINTR): + continue + raise + if not s: + raise IOError('Socket closed') + read_buffer += s + + result = read_buffer[:n] + self._read_buffer = read_buffer[n:] + + return result + + def __del__(self): + try: + self.close() + except Exception: + pass + finally: + self.sock = None + +transport.TCPTransport = TCPTransport + + +class SSLTransport(transport.SSLTransport): + + def __init__(self, host, connect_timeout, ssl): + if isinstance(ssl, dict): + self.sslopts = ssl + self.sslobj = None + + transport._AbstractTransport.__init__(self, host, connect_timeout) + + def read_frame(self): + frame_type, channel, size = unpack('>BHI', self._read(7, True)) + payload = self._read(size) + ch = ord(self._read(1)) + if ch == 206: # '\xce' + return frame_type, channel, payload + else: + raise Exception( + 'Framing Error, received 0x%02x while expecting 0xce' % ch) + + def _read(self, n, initial=False): + result = '' + + while len(result) < n: + try: + s = self.sslobj.read(n - len(result)) + except socket.error as exc: + if not initial and exc.errno in (errno.EAGAIN, errno.EINTR): + continue + raise + if not s: + raise IOError('Socket closed') + result += s + + return result + + def __del__(self): + try: + self.close() + except Exception: + pass + finally: + self.sock = None +transport.SSLTransport = SSLTransport + + +class Connection(amqp.Connection): # pragma: no cover + connected = True + + def _do_close(self, *args, **kwargs): + # amqplib does not ignore socket errors when connection + # is closed on the remote end. + try: + super(Connection, self)._do_close(*args, **kwargs) + except socket.error: + pass + + def _dispatch_basic_return(self, channel, args, msg): + reply_code = args.read_short() + reply_text = args.read_shortstr() + exchange = args.read_shortstr() + routing_key = args.read_shortstr() + + exc = AMQPChannelException(reply_code, reply_text, (50, 60)) + if channel.events['basic_return']: + for callback in channel.events['basic_return']: + callback(exc, exchange, routing_key, msg) + else: + raise exc + + def __init__(self, *args, **kwargs): + super(Connection, self).__init__(*args, **kwargs) + self._method_override = {(60, 50): self._dispatch_basic_return} + + def drain_events(self, timeout=None): + """Wait for an event on a channel.""" + chanmap = self.channels + chanid, method_sig, args, content = self._wait_multiple( + chanmap, None, timeout=timeout) + + channel = chanmap[chanid] + + if (content + and channel.auto_decode + and hasattr(content, 'content_encoding')): + try: + content.body = content.body.decode(content.content_encoding) + except Exception: + pass + + amqp_method = self._method_override.get(method_sig) or \ + channel._METHOD_MAP.get(method_sig, None) + + if amqp_method is None: + raise Exception('Unknown AMQP method (%d, %d)' % method_sig) + + if content is None: + return amqp_method(channel, args) + else: + return amqp_method(channel, args, content) + + def read_timeout(self, timeout=None): + if timeout is None: + return self.method_reader.read_method() + sock = self.transport.sock + prev = sock.gettimeout() + if prev != timeout: + sock.settimeout(timeout) + try: + try: + return self.method_reader.read_method() + except SSLError as exc: + # http://bugs.python.org/issue10272 + if 'timed out' in str(exc): + raise socket.timeout() + # Non-blocking SSL sockets can throw SSLError + if 'The operation did not complete' in str(exc): + raise socket.timeout() + raise + finally: + if prev != timeout: + sock.settimeout(prev) + + def _wait_multiple(self, channels, allowed_methods, timeout=None): + for channel_id, channel in items(channels): + method_queue = channel.method_queue + for queued_method in method_queue: + method_sig = queued_method[0] + if (allowed_methods is None + or method_sig in allowed_methods + or method_sig == (20, 40)): + method_queue.remove(queued_method) + method_sig, args, content = queued_method + return channel_id, method_sig, args, content + + # Nothing queued, need to wait for a method from the peer + read_timeout = self.read_timeout + wait = self.wait + while 1: + channel, method_sig, args, content = read_timeout(timeout) + + if (channel in channels + and allowed_methods is None + or method_sig in allowed_methods + or method_sig == (20, 40)): + return channel, method_sig, args, content + + # Not the channel and/or method we were looking for. Queue + # this method for later + channels[channel].method_queue.append((method_sig, args, content)) + + # + # If we just queued up a method for channel 0 (the Connection + # itself) it's probably a close method in reaction to some + # error, so deal with it right away. + # + if channel == 0: + wait() + + def channel(self, channel_id=None): + try: + return self.channels[channel_id] + except KeyError: + return Channel(self, channel_id) + + +class Message(base.Message): + + def __init__(self, channel, msg, **kwargs): + props = msg.properties + super(Message, self).__init__( + channel, + body=msg.body, + delivery_tag=msg.delivery_tag, + content_type=props.get('content_type'), + content_encoding=props.get('content_encoding'), + delivery_info=msg.delivery_info, + properties=msg.properties, + headers=props.get('application_headers') or {}, + **kwargs) + + +class Channel(_Channel, base.StdChannel): + Message = Message + events = {'basic_return': set()} + + def __init__(self, *args, **kwargs): + self.no_ack_consumers = set() + super(Channel, self).__init__(*args, **kwargs) + + def prepare_message(self, body, priority=None, content_type=None, + content_encoding=None, headers=None, properties=None): + """Encapsulate data into a AMQP message.""" + return amqp.Message(body, priority=priority, + content_type=content_type, + content_encoding=content_encoding, + application_headers=headers, + **properties) + + def message_to_python(self, raw_message): + """Convert encoded message body back to a Python value.""" + return self.Message(self, raw_message) + + def close(self): + try: + super(Channel, self).close() + finally: + self.connection = None + + def basic_consume(self, *args, **kwargs): + consumer_tag = super(Channel, self).basic_consume(*args, **kwargs) + if kwargs['no_ack']: + self.no_ack_consumers.add(consumer_tag) + return consumer_tag + + def basic_cancel(self, consumer_tag, **kwargs): + self.no_ack_consumers.discard(consumer_tag) + return super(Channel, self).basic_cancel(consumer_tag, **kwargs) + + +class Transport(base.Transport): + Connection = Connection + + default_port = DEFAULT_PORT + + # it's very annoying that amqplib sometimes raises AttributeError + # if the connection is lost, but nothing we can do about that here. + connection_errors = ( + base.Transport.connection_errors + ( + AMQPConnectionException, + socket.error, IOError, OSError, AttributeError) + ) + channel_errors = base.Transport.channel_errors + (AMQPChannelException, ) + + driver_name = 'amqplib' + driver_type = 'amqp' + supports_ev = True + + def __init__(self, client, **kwargs): + self.client = client + self.default_port = kwargs.get('default_port') or self.default_port + + if amqp is NA: + raise ImportError('Missing amqplib library (pip install amqplib)') + + def create_channel(self, connection): + return connection.channel() + + def drain_events(self, connection, **kwargs): + return connection.drain_events(**kwargs) + + def establish_connection(self): + """Establish connection to the AMQP broker.""" + conninfo = self.client + for name, default_value in items(self.default_connection_params): + if not getattr(conninfo, name, None): + setattr(conninfo, name, default_value) + if conninfo.hostname == 'localhost': + conninfo.hostname = '127.0.0.1' + conn = self.Connection(host=conninfo.host, + userid=conninfo.userid, + password=conninfo.password, + login_method=conninfo.login_method, + virtual_host=conninfo.virtual_host, + insist=conninfo.insist, + ssl=conninfo.ssl, + connect_timeout=conninfo.connect_timeout) + conn.client = self.client + return conn + + def close_connection(self, connection): + """Close the AMQP broker connection.""" + connection.client = None + connection.close() + + def is_alive(self, connection): + if HAS_MSG_PEEK: + sock = connection.transport.sock + prev = sock.gettimeout() + sock.settimeout(0.0001) + try: + sock.recv(1, socket.MSG_PEEK) + except socket.timeout: + pass + except socket.error: + return False + finally: + sock.settimeout(prev) + return True + + def verify_connection(self, connection): + return connection.channels is not None and self.is_alive(connection) + + def register_with_event_loop(self, connection, loop): + loop.add_reader(connection.method_reader.source.sock, + self.on_readable, connection, loop) + + @property + def default_connection_params(self): + return {'userid': 'guest', 'password': 'guest', + 'port': self.default_port, + 'hostname': 'localhost', 'login_method': 'AMQPLAIN'} + + def get_manager(self, *args, **kwargs): + return get_manager(self.client, *args, **kwargs) diff --git a/kombu/transport/base.py b/kombu/transport/base.py new file mode 100644 index 0000000..c226307 --- /dev/null +++ b/kombu/transport/base.py @@ -0,0 +1,173 @@ +""" +kombu.transport.base +==================== + +Base transport interface. + +""" +from __future__ import absolute_import + +import errno +import socket + +from kombu.exceptions import ChannelError, ConnectionError +from kombu.message import Message +from kombu.utils import cached_property +from kombu.utils.compat import get_errno + +__all__ = ['Message', 'StdChannel', 'Management', 'Transport'] + + +def _LeftBlank(obj, method): + return NotImplementedError( + 'Transport {0.__module__}.{0.__name__} does not implement {1}'.format( + obj.__class__, method)) + + +class StdChannel(object): + no_ack_consumers = None + + def Consumer(self, *args, **kwargs): + from kombu.messaging import Consumer + return Consumer(self, *args, **kwargs) + + def Producer(self, *args, **kwargs): + from kombu.messaging import Producer + return Producer(self, *args, **kwargs) + + def get_bindings(self): + raise _LeftBlank(self, 'get_bindings') + + def after_reply_message_received(self, queue): + """reply queue semantics: can be used to delete the queue + after transient reply message received.""" + pass + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + +class Management(object): + + def __init__(self, transport): + self.transport = transport + + def get_bindings(self): + raise _LeftBlank(self, 'get_bindings') + + +class Transport(object): + """Base class for transports.""" + Management = Management + + #: The :class:`~kombu.Connection` owning this instance. + client = None + + #: Set to True if :class:`~kombu.Connection` should pass the URL + #: unmodified. + can_parse_url = False + + #: Default port used when no port has been specified. + default_port = None + + #: Tuple of errors that can happen due to connection failure. + connection_errors = (ConnectionError, ) + + #: Tuple of errors that can happen due to channel/method failure. + channel_errors = (ChannelError, ) + + #: Type of driver, can be used to separate transports + #: using the AMQP protocol (driver_type: 'amqp'), + #: Redis (driver_type: 'redis'), etc... + driver_type = 'N/A' + + #: Name of driver library (e.g. 'py-amqp', 'redis', 'beanstalkc'). + driver_name = 'N/A' + + #: Whether this transports support heartbeats, + #: and that the :meth:`heartbeat_check` method has any effect. + supports_heartbeats = False + + #: Set to true if the transport supports the AIO interface. + supports_ev = False + + __reader = None + + def __init__(self, client, **kwargs): + self.client = client + + def establish_connection(self): + raise _LeftBlank(self, 'establish_connection') + + def close_connection(self, connection): + raise _LeftBlank(self, 'close_connection') + + def create_channel(self, connection): + raise _LeftBlank(self, 'create_channel') + + def close_channel(self, connection): + raise _LeftBlank(self, 'close_channel') + + def drain_events(self, connection, **kwargs): + raise _LeftBlank(self, 'drain_events') + + def heartbeat_check(self, connection, rate=2): + pass + + def driver_version(self): + return 'N/A' + + def get_heartbeat_interval(self, connection): + return 0 + + def register_with_event_loop(self, loop): + pass + + def unregister_from_event_loop(self, loop): + pass + + def verify_connection(self, connection): + return True + + def _make_reader(self, connection, timeout=socket.timeout, + error=socket.error, get_errno=get_errno, + _unavail=(errno.EAGAIN, errno.EINTR)): + drain_events = connection.drain_events + + def _read(loop): + if not connection.connected: + raise ConnectionError('Socket was disconnected') + try: + drain_events(timeout=0) + except timeout: + return + except error as exc: + if get_errno(exc) in _unavail: + return + raise + loop.call_soon(_read, loop) + + return _read + + def qos_semantics_matches_spec(self, connection): + return True + + def on_readable(self, connection, loop): + reader = self.__reader + if reader is None: + reader = self.__reader = self._make_reader(connection) + reader(loop) + + @property + def default_connection_params(self): + return {} + + def get_manager(self, *args, **kwargs): + return self.Management(self) + + @cached_property + def manager(self): + return self.get_manager() diff --git a/kombu/transport/beanstalk.py b/kombu/transport/beanstalk.py new file mode 100644 index 0000000..4e73bbc --- /dev/null +++ b/kombu/transport/beanstalk.py @@ -0,0 +1,155 @@ +""" +kombu.transport.beanstalk +========================= + +Beanstalk transport. + +:copyright: (c) 2010 - 2013 by David Ziegler. +:license: BSD, see LICENSE for more details. + +""" +from __future__ import absolute_import + +import socket + +from anyjson import loads, dumps + +from kombu.five import Empty +from kombu.utils.encoding import bytes_to_str + +from . import virtual + +try: + import beanstalkc +except ImportError: # pragma: no cover + beanstalkc = None # noqa + +DEFAULT_PORT = 11300 + +__author__ = 'David Ziegler ' + + +class Channel(virtual.Channel): + _client = None + + def _parse_job(self, job): + item, dest = None, None + if job: + try: + item = loads(bytes_to_str(job.body)) + dest = job.stats()['tube'] + except Exception: + job.bury() + else: + job.delete() + else: + raise Empty() + return item, dest + + def _put(self, queue, message, **kwargs): + extra = {} + priority = message['properties']['delivery_info']['priority'] + ttr = message['properties'].get('ttr') + if ttr is not None: + extra['ttr'] = ttr + + self.client.use(queue) + self.client.put(dumps(message), priority=priority, **extra) + + def _get(self, queue): + if queue not in self.client.watching(): + self.client.watch(queue) + + [self.client.ignore(active) for active in self.client.watching() + if active != queue] + + job = self.client.reserve(timeout=1) + item, dest = self._parse_job(job) + return item + + def _get_many(self, queues, timeout=1): + # timeout of None will cause beanstalk to timeout waiting + # for a new request + if timeout is None: + timeout = 1 + + watching = self.client.watching() + + [self.client.watch(active) for active in queues + if active not in watching] + + [self.client.ignore(active) for active in watching + if active not in queues] + + job = self.client.reserve(timeout=timeout) + return self._parse_job(job) + + def _purge(self, queue): + if queue not in self.client.watching(): + self.client.watch(queue) + + [self.client.ignore(active) + for active in self.client.watching() + if active != queue] + count = 0 + while 1: + job = self.client.reserve(timeout=1) + if job: + job.delete() + count += 1 + else: + break + return count + + def _size(self, queue): + return 0 + + def _open(self): + conninfo = self.connection.client + host = conninfo.hostname or 'localhost' + port = conninfo.port or DEFAULT_PORT + conn = beanstalkc.Connection(host=host, port=port) + conn.connect() + return conn + + def close(self): + if self._client is not None: + return self._client.close() + super(Channel, self).close() + + @property + def client(self): + if self._client is None: + self._client = self._open() + return self._client + + +class Transport(virtual.Transport): + Channel = Channel + + polling_interval = 1 + default_port = DEFAULT_PORT + connection_errors = ( + virtual.Transport.connection_errors + ( + socket.error, IOError, + getattr(beanstalkc, 'SocketError', None), + ) + ) + channel_errors = ( + virtual.Transport.channel_errors + ( + socket.error, IOError, + getattr(beanstalkc, 'SocketError', None), + getattr(beanstalkc, 'BeanstalkcException', None), + ) + ) + driver_type = 'beanstalk' + driver_name = 'beanstalkc' + + def __init__(self, *args, **kwargs): + if beanstalkc is None: + raise ImportError( + 'Missing beanstalkc library (pip install beanstalkc)') + super(Transport, self).__init__(*args, **kwargs) + + def driver_version(self): + return beanstalkc.__version__ diff --git a/kombu/transport/couchdb.py b/kombu/transport/couchdb.py new file mode 100644 index 0000000..99d1362 --- /dev/null +++ b/kombu/transport/couchdb.py @@ -0,0 +1,142 @@ +""" +kombu.transport.couchdb +======================= + +CouchDB transport. + +:copyright: (c) 2010 - 2013 by David Clymer. +:license: BSD, see LICENSE for more details. + +""" +from __future__ import absolute_import + +import socket + +from anyjson import loads, dumps + +from kombu.five import Empty +from kombu.utils import uuid4 +from kombu.utils.encoding import bytes_to_str + +from . import virtual + +try: + import couchdb +except ImportError: # pragma: no cover + couchdb = None # noqa + +DEFAULT_PORT = 5984 +DEFAULT_DATABASE = 'kombu_default' + +__author__ = 'David Clymer ' + + +def create_message_view(db): + from couchdb import design + + view = design.ViewDefinition('kombu', 'messages', """ + function (doc) { + if (doc.queue && doc.payload) + emit(doc.queue, doc); + } + """) + if not view.get_doc(db): + view.sync(db) + + +class Channel(virtual.Channel): + _client = None + + view_created = False + + def _put(self, queue, message, **kwargs): + self.client.save({'_id': uuid4().hex, + 'queue': queue, + 'payload': dumps(message)}) + + def _get(self, queue): + result = self._query(queue, limit=1) + if not result: + raise Empty() + + item = result.rows[0].value + self.client.delete(item) + return loads(bytes_to_str(item['payload'])) + + def _purge(self, queue): + result = self._query(queue) + for item in result: + self.client.delete(item.value) + return len(result) + + def _size(self, queue): + return len(self._query(queue)) + + def _open(self): + conninfo = self.connection.client + dbname = conninfo.virtual_host + proto = conninfo.ssl and 'https' or 'http' + if not dbname or dbname == '/': + dbname = DEFAULT_DATABASE + port = conninfo.port or DEFAULT_PORT + server = couchdb.Server('%s://%s:%s/' % (proto, + conninfo.hostname, + port)) + # Use username and password if avaliable + try: + if conninfo.userid: + server.resource.credentials = (conninfo.userid, + conninfo.password) + except AttributeError: + pass + try: + return server[dbname] + except couchdb.http.ResourceNotFound: + return server.create(dbname) + + def _query(self, queue, **kwargs): + if not self.view_created: + # if the message view is not yet set up, we'll need it now. + create_message_view(self.client) + self.view_created = True + return self.client.view('kombu/messages', key=queue, **kwargs) + + @property + def client(self): + if self._client is None: + self._client = self._open() + return self._client + + +class Transport(virtual.Transport): + Channel = Channel + + polling_interval = 1 + default_port = DEFAULT_PORT + connection_errors = ( + virtual.Transport.connection_errors + ( + socket.error, + getattr(couchdb, 'HTTPError', None), + getattr(couchdb, 'ServerError', None), + getattr(couchdb, 'Unauthorized', None), + ) + ) + channel_errors = ( + virtual.Transport.channel_errors + ( + getattr(couchdb, 'HTTPError', None), + getattr(couchdb, 'ServerError', None), + getattr(couchdb, 'PreconditionFailed', None), + getattr(couchdb, 'ResourceConflict', None), + getattr(couchdb, 'ResourceNotFound', None), + ) + ) + driver_type = 'couchdb' + driver_name = 'couchdb' + + def __init__(self, *args, **kwargs): + if couchdb is None: + raise ImportError('Missing couchdb library (pip install couchdb)') + super(Transport, self).__init__(*args, **kwargs) + + def driver_version(self): + return couchdb.__version__ diff --git a/kombu/transport/django/__init__.py b/kombu/transport/django/__init__.py new file mode 100644 index 0000000..e859f3f --- /dev/null +++ b/kombu/transport/django/__init__.py @@ -0,0 +1,68 @@ +"""Kombu transport using the Django database as a message store.""" +from __future__ import absolute_import + +from anyjson import loads, dumps + +from django.conf import settings +from django.core import exceptions as errors + +from kombu.five import Empty +from kombu.transport import virtual +from kombu.utils.encoding import bytes_to_str + +from .models import Queue + +VERSION = (1, 0, 0) +__version__ = '.'.join(map(str, VERSION)) + +POLLING_INTERVAL = getattr(settings, 'KOMBU_POLLING_INTERVAL', + getattr(settings, 'DJKOMBU_POLLING_INTERVAL', 5.0)) + + +class Channel(virtual.Channel): + + def _new_queue(self, queue, **kwargs): + Queue.objects.get_or_create(name=queue) + + def _put(self, queue, message, **kwargs): + Queue.objects.publish(queue, dumps(message)) + + def basic_consume(self, queue, *args, **kwargs): + qinfo = self.state.bindings[queue] + exchange = qinfo[0] + if self.typeof(exchange).type == 'fanout': + return + super(Channel, self).basic_consume(queue, *args, **kwargs) + + def _get(self, queue): + m = Queue.objects.fetch(queue) + if m: + return loads(bytes_to_str(m)) + raise Empty() + + def _size(self, queue): + return Queue.objects.size(queue) + + def _purge(self, queue): + return Queue.objects.purge(queue) + + def refresh_connection(self): + from django import db + db.close_connection() + + +class Transport(virtual.Transport): + Channel = Channel + + default_port = 0 + polling_interval = POLLING_INTERVAL + channel_errors = ( + virtual.Transport.channel_errors + ( + errors.ObjectDoesNotExist, errors.MultipleObjectsReturned) + ) + driver_type = 'sql' + driver_name = 'django' + + def driver_version(self): + import django + return '.'.join(map(str, django.VERSION)) diff --git a/kombu/transport/django/management/__init__.py b/kombu/transport/django/management/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kombu/transport/django/management/commands/__init__.py b/kombu/transport/django/management/commands/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kombu/transport/django/management/commands/clean_kombu_messages.py b/kombu/transport/django/management/commands/clean_kombu_messages.py new file mode 100644 index 0000000..c82ba9f --- /dev/null +++ b/kombu/transport/django/management/commands/clean_kombu_messages.py @@ -0,0 +1,22 @@ +from __future__ import absolute_import + +from django.core.management.base import BaseCommand + + +def pluralize(desc, value): + if value > 1: + return desc + 's' + return desc + + +class Command(BaseCommand): + requires_model_validation = True + + def handle(self, *args, **options): + from kombu.transport.django.models import Message + + count = Message.objects.filter(visible=False).count() + + print('Removing {0} invisible {1} from database... '.format( + count, pluralize('message', count))) + Message.objects.cleanup() diff --git a/kombu/transport/django/managers.py b/kombu/transport/django/managers.py new file mode 100644 index 0000000..6b3f378 --- /dev/null +++ b/kombu/transport/django/managers.py @@ -0,0 +1,86 @@ +from __future__ import absolute_import + +from django.db import transaction, connection, models +try: + from django.db import connections, router +except ImportError: # pre-Django 1.2 + connections = router = None # noqa + + +class QueueManager(models.Manager): + + def publish(self, queue_name, payload): + queue, created = self.get_or_create(name=queue_name) + queue.messages.create(payload=payload) + + def fetch(self, queue_name): + try: + queue = self.get(name=queue_name) + except self.model.DoesNotExist: + return + + return queue.messages.pop() + + def size(self, queue_name): + return self.get(name=queue_name).messages.count() + + def purge(self, queue_name): + try: + queue = self.get(name=queue_name) + except self.model.DoesNotExist: + return + + messages = queue.messages.all() + count = messages.count() + messages.delete() + return count + + +def select_for_update(qs): + try: + return qs.select_for_update() + except AttributeError: + return qs + + +class MessageManager(models.Manager): + _messages_received = [0] + cleanup_every = 10 + + @transaction.commit_manually + def pop(self): + try: + resultset = select_for_update( + self.filter(visible=True).order_by('sent_at', 'id') + ) + result = resultset[0:1].get() + result.visible = False + result.save() + recv = self.__class__._messages_received + recv[0] += 1 + if not recv[0] % self.cleanup_every: + self.cleanup() + transaction.commit() + return result.payload + except self.model.DoesNotExist: + transaction.commit() + except: + transaction.rollback() + + def cleanup(self): + cursor = self.connection_for_write().cursor() + try: + cursor.execute( + 'DELETE FROM %s WHERE visible=%%s' % ( + self.model._meta.db_table, ), + (False, ) + ) + except: + transaction.rollback_unless_managed() + else: + transaction.commit_unless_managed() + + def connection_for_write(self): + if connections: + return connections[router.db_for_write(self.model)] + return connection diff --git a/kombu/transport/django/migrations/0001_initial.py b/kombu/transport/django/migrations/0001_initial.py new file mode 100644 index 0000000..ea1edb0 --- /dev/null +++ b/kombu/transport/django/migrations/0001_initial.py @@ -0,0 +1,57 @@ +# encoding: utf-8 +from __future__ import absolute_import + +# flake8: noqa +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + +class Migration(SchemaMigration): + + def forwards(self, orm): + + # Adding model 'Queue' + db.create_table('djkombu_queue', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=200)), + )) + db.send_create_signal('django', ['Queue']) + + # Adding model 'Message' + db.create_table('djkombu_message', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('visible', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)), + ('sent_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)), + ('payload', self.gf('django.db.models.fields.TextField')()), + ('queue', self.gf('django.db.models.fields.related.ForeignKey')(related_name='messages', to=orm['django.Queue'])), + )) + db.send_create_signal('django', ['Message']) + + + def backwards(self, orm): + + # Deleting model 'Queue' + db.delete_table('djkombu_queue') + + # Deleting model 'Message' + db.delete_table('djkombu_message') + + + models = { + 'django.message': { + 'Meta': {'object_name': 'Message', 'db_table': "'djkombu_message'"}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'payload': ('django.db.models.fields.TextField', [], {}), + 'queue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['django.Queue']"}), + 'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), + 'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}) + }, + 'django.queue': { + 'Meta': {'object_name': 'Queue', 'db_table': "'djkombu_queue'"}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}) + } + } + + complete_apps = ['django'] diff --git a/kombu/transport/django/migrations/__init__.py b/kombu/transport/django/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/kombu/transport/django/models.py b/kombu/transport/django/models.py new file mode 100644 index 0000000..df6a462 --- /dev/null +++ b/kombu/transport/django/models.py @@ -0,0 +1,32 @@ +from __future__ import absolute_import + +from django.db import models +from django.utils.translation import ugettext_lazy as _ + +from .managers import QueueManager, MessageManager + + +class Queue(models.Model): + name = models.CharField(_('name'), max_length=200, unique=True) + + objects = QueueManager() + + class Meta: + db_table = 'djkombu_queue' + verbose_name = _('queue') + verbose_name_plural = _('queues') + + +class Message(models.Model): + visible = models.BooleanField(default=True, db_index=True) + sent_at = models.DateTimeField(null=True, blank=True, db_index=True, + auto_now_add=True) + payload = models.TextField(_('payload'), null=False) + queue = models.ForeignKey(Queue, related_name='messages') + + objects = MessageManager() + + class Meta: + db_table = 'djkombu_message' + verbose_name = _('message') + verbose_name_plural = _('messages') diff --git a/kombu/transport/filesystem.py b/kombu/transport/filesystem.py new file mode 100644 index 0000000..c83dcdc --- /dev/null +++ b/kombu/transport/filesystem.py @@ -0,0 +1,193 @@ +""" +kombu.transport.filesystem +========================== + +Transport using the file system as the message store. + +""" +from __future__ import absolute_import + +from anyjson import loads, dumps + +import os +import shutil +import uuid +import tempfile + +from . import virtual +from kombu.exceptions import ChannelError +from kombu.five import Empty, monotonic +from kombu.utils import cached_property +from kombu.utils.encoding import bytes_to_str, str_to_bytes + +VERSION = (1, 0, 0) +__version__ = '.'.join(map(str, VERSION)) + +# needs win32all to work on Windows +if os.name == 'nt': + + import win32con + import win32file + import pywintypes + + LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK + # 0 is the default + LOCK_SH = 0 # noqa + LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY # noqa + __overlapped = pywintypes.OVERLAPPED() + + def lock(file, flags): + hfile = win32file._get_osfhandle(file.fileno()) + win32file.LockFileEx(hfile, flags, 0, 0xffff0000, __overlapped) + + def unlock(file): + hfile = win32file._get_osfhandle(file.fileno()) + win32file.UnlockFileEx(hfile, 0, 0xffff0000, __overlapped) + +elif os.name == 'posix': + + import fcntl + from fcntl import LOCK_EX, LOCK_SH, LOCK_NB # noqa + + def lock(file, flags): # noqa + fcntl.flock(file.fileno(), flags) + + def unlock(file): # noqa + fcntl.flock(file.fileno(), fcntl.LOCK_UN) +else: + raise RuntimeError( + 'Filesystem plugin only defined for NT and POSIX platforms') + + +class Channel(virtual.Channel): + + def _put(self, queue, payload, **kwargs): + """Put `message` onto `queue`.""" + + filename = '%s_%s.%s.msg' % (int(round(monotonic() * 1000)), + uuid.uuid4(), queue) + filename = os.path.join(self.data_folder_out, filename) + + try: + f = open(filename, 'wb') + lock(f, LOCK_EX) + f.write(str_to_bytes(dumps(payload))) + except (IOError, OSError): + raise ChannelError( + 'Cannot add file {0!r} to directory'.format(filename)) + finally: + unlock(f) + f.close() + + def _get(self, queue): + """Get next message from `queue`.""" + + queue_find = '.' + queue + '.msg' + folder = os.listdir(self.data_folder_in) + folder = sorted(folder) + while len(folder) > 0: + filename = folder.pop(0) + + # only handle message for the requested queue + if filename.find(queue_find) < 0: + continue + + if self.store_processed: + processed_folder = self.processed_folder + else: + processed_folder = tempfile.gettempdir() + + try: + # move the file to the tmp/processed folder + shutil.move(os.path.join(self.data_folder_in, filename), + processed_folder) + except IOError: + pass # file could be locked, or removed in meantime so ignore + + filename = os.path.join(processed_folder, filename) + try: + f = open(filename, 'rb') + payload = f.read() + f.close() + if not self.store_processed: + os.remove(filename) + except (IOError, OSError): + raise ChannelError( + 'Cannot read file {0!r} from queue.'.format(filename)) + + return loads(bytes_to_str(payload)) + + raise Empty() + + def _purge(self, queue): + """Remove all messages from `queue`.""" + count = 0 + queue_find = '.' + queue + '.msg' + + folder = os.listdir(self.data_folder_in) + while len(folder) > 0: + filename = folder.pop() + try: + # only purge messages for the requested queue + if filename.find(queue_find) < 0: + continue + + filename = os.path.join(self.data_folder_in, filename) + os.remove(filename) + + count += 1 + + except OSError: + # we simply ignore its existence, as it was probably + # processed by another worker + pass + + return count + + def _size(self, queue): + """Return the number of messages in `queue` as an :class:`int`.""" + count = 0 + + queue_find = '.{0}.msg'.format(queue) + folder = os.listdir(self.data_folder_in) + while len(folder) > 0: + filename = folder.pop() + + # only handle message for the requested queue + if filename.find(queue_find) < 0: + continue + + count += 1 + + return count + + @property + def transport_options(self): + return self.connection.client.transport_options + + @cached_property + def data_folder_in(self): + return self.transport_options.get('data_folder_in', 'data_in') + + @cached_property + def data_folder_out(self): + return self.transport_options.get('data_folder_out', 'data_out') + + @cached_property + def store_processed(self): + return self.transport_options.get('store_processed', False) + + @cached_property + def processed_folder(self): + return self.transport_options.get('processed_folder', 'processed') + + +class Transport(virtual.Transport): + Channel = Channel + + default_port = 0 + driver_type = 'filesystem' + driver_name = 'filesystem' + + def driver_version(self): + return 'N/A' diff --git a/kombu/transport/librabbitmq.py b/kombu/transport/librabbitmq.py new file mode 100644 index 0000000..286bd78 --- /dev/null +++ b/kombu/transport/librabbitmq.py @@ -0,0 +1,173 @@ +""" +kombu.transport.librabbitmq +=========================== + +`librabbitmq`_ transport. + +.. _`librabbitmq`: http://pypi.python.org/librabbitmq/ + +""" +from __future__ import absolute_import + +import os +import socket +import warnings + +try: + import librabbitmq as amqp + from librabbitmq import ChannelError, ConnectionError +except ImportError: # pragma: no cover + try: + import pylibrabbitmq as amqp # noqa + from pylibrabbitmq import ChannelError, ConnectionError # noqa + except ImportError: + raise ImportError('No module named librabbitmq') + +from kombu.five import items, values +from kombu.utils.amq_manager import get_manager +from kombu.utils.text import version_string_as_tuple + +from . import base + +W_VERSION = """ + librabbitmq version too old to detect RabbitMQ version information + so make sure you are using librabbitmq 1.5 when using rabbitmq > 3.3 +""" +DEFAULT_PORT = 5672 + +NO_SSL_ERROR = """\ +ssl not supported by librabbitmq, please use pyamqp:// or stunnel\ +""" + + +class Message(base.Message): + + def __init__(self, channel, props, info, body): + super(Message, self).__init__( + channel, + body=body, + delivery_info=info, + properties=props, + delivery_tag=info.get('delivery_tag'), + content_type=props.get('content_type'), + content_encoding=props.get('content_encoding'), + headers=props.get('headers')) + + +class Channel(amqp.Channel, base.StdChannel): + Message = Message + + def prepare_message(self, body, priority=None, + content_type=None, content_encoding=None, + headers=None, properties=None): + """Encapsulate data into a AMQP message.""" + properties = properties if properties is not None else {} + properties.update({'content_type': content_type, + 'content_encoding': content_encoding, + 'headers': headers, + 'priority': priority}) + return body, properties + + +class Connection(amqp.Connection): + Channel = Channel + Message = Message + + +class Transport(base.Transport): + Connection = Connection + + default_port = DEFAULT_PORT + connection_errors = ( + base.Transport.connection_errors + ( + ConnectionError, socket.error, IOError, OSError) + ) + channel_errors = ( + base.Transport.channel_errors + (ChannelError, ) + ) + driver_type = 'amqp' + driver_name = 'librabbitmq' + + supports_ev = True + + def __init__(self, client, **kwargs): + self.client = client + self.default_port = kwargs.get('default_port') or self.default_port + self.__reader = None + + def driver_version(self): + return amqp.__version__ + + def create_channel(self, connection): + return connection.channel() + + def drain_events(self, connection, **kwargs): + return connection.drain_events(**kwargs) + + def establish_connection(self): + """Establish connection to the AMQP broker.""" + conninfo = self.client + for name, default_value in items(self.default_connection_params): + if not getattr(conninfo, name, None): + setattr(conninfo, name, default_value) + if conninfo.ssl: + raise NotImplementedError(NO_SSL_ERROR) + opts = dict({ + 'host': conninfo.host, + 'userid': conninfo.userid, + 'password': conninfo.password, + 'virtual_host': conninfo.virtual_host, + 'login_method': conninfo.login_method, + 'insist': conninfo.insist, + 'ssl': conninfo.ssl, + 'connect_timeout': conninfo.connect_timeout, + }, **conninfo.transport_options or {}) + conn = self.Connection(**opts) + conn.client = self.client + self.client.drain_events = conn.drain_events + return conn + + def close_connection(self, connection): + """Close the AMQP broker connection.""" + self.client.drain_events = None + connection.close() + + def _collect(self, connection): + if connection is not None: + for channel in values(connection.channels): + channel.connection = None + try: + os.close(connection.fileno()) + except OSError: + pass + connection.channels.clear() + connection.callbacks.clear() + self.client.drain_events = None + self.client = None + + def verify_connection(self, connection): + return connection.connected + + def register_with_event_loop(self, connection, loop): + loop.add_reader( + connection.fileno(), self.on_readable, connection, loop, + ) + + def get_manager(self, *args, **kwargs): + return get_manager(self.client, *args, **kwargs) + + def qos_semantics_matches_spec(self, connection): + try: + props = connection.server_properties + except AttributeError: + warnings.warn(UserWarning(W_VERSION)) + else: + if props.get('product') == 'RabbitMQ': + return version_string_as_tuple(props['version']) < (3, 3) + return True + + @property + def default_connection_params(self): + return {'userid': 'guest', 'password': 'guest', + 'port': self.default_port, + 'hostname': 'localhost', 'login_method': 'AMQPLAIN'} diff --git a/kombu/transport/memory.py b/kombu/transport/memory.py new file mode 100644 index 0000000..b1ba70f --- /dev/null +++ b/kombu/transport/memory.py @@ -0,0 +1,77 @@ +""" +kombu.transport.memory +====================== + +In-memory transport. + +""" +from __future__ import absolute_import + +from kombu.five import Queue, values + +from . import virtual + + +class Channel(virtual.Channel): + queues = {} + do_restore = False + supports_fanout = True + + def _has_queue(self, queue, **kwargs): + return queue in self.queues + + def _new_queue(self, queue, **kwargs): + if queue not in self.queues: + self.queues[queue] = Queue() + + def _get(self, queue, timeout=None): + return self._queue_for(queue).get(block=False) + + def _queue_for(self, queue): + if queue not in self.queues: + self.queues[queue] = Queue() + return self.queues[queue] + + def _queue_bind(self, *args): + pass + + def _put_fanout(self, exchange, message, routing_key=None, **kwargs): + for queue in self._lookup(exchange, routing_key): + self._queue_for(queue).put(message) + + def _put(self, queue, message, **kwargs): + self._queue_for(queue).put(message) + + def _size(self, queue): + return self._queue_for(queue).qsize() + + def _delete(self, queue, *args): + self.queues.pop(queue, None) + + def _purge(self, queue): + q = self._queue_for(queue) + size = q.qsize() + q.queue.clear() + return size + + def close(self): + super(Channel, self).close() + for queue in values(self.queues): + queue.empty() + self.queues = {} + + def after_reply_message_received(self, queue): + pass + + +class Transport(virtual.Transport): + Channel = Channel + + #: memory backend state is global. + state = virtual.BrokerState() + + driver_type = 'memory' + driver_name = 'memory' + + def driver_version(self): + return 'N/A' diff --git a/kombu/transport/mongodb.py b/kombu/transport/mongodb.py new file mode 100644 index 0000000..2e1f9de --- /dev/null +++ b/kombu/transport/mongodb.py @@ -0,0 +1,314 @@ +""" +kombu.transport.mongodb +======================= + +MongoDB transport. + +:copyright: (c) 2010 - 2013 by Flavio Percoco Premoli. +:license: BSD, see LICENSE for more details. + +""" +from __future__ import absolute_import + +import pymongo + +from pymongo import errors +from anyjson import loads, dumps +from pymongo import MongoClient, uri_parser + +from kombu.five import Empty +from kombu.syn import _detect_environment +from kombu.utils.encoding import bytes_to_str + +from . import virtual + +DEFAULT_HOST = '127.0.0.1' +DEFAULT_PORT = 27017 + +DEFAULT_MESSAGES_COLLECTION = 'messages' +DEFAULT_ROUTING_COLLECTION = 'messages.routing' +DEFAULT_BROADCAST_COLLECTION = 'messages.broadcast' + + +class BroadcastCursor(object): + """Cursor for broadcast queues.""" + + def __init__(self, cursor): + self._cursor = cursor + + self.purge(rewind=False) + + def get_size(self): + return self._cursor.count() - self._offset + + def close(self): + self._cursor.close() + + def purge(self, rewind=True): + if rewind: + self._cursor.rewind() + + # Fast forward the cursor past old events + self._offset = self._cursor.count() + self._cursor = self._cursor.skip(self._offset) + + def __iter__(self): + return self + + def __next__(self): + while True: + try: + msg = next(self._cursor) + except pymongo.errors.OperationFailure as exc: + # In some cases tailed cursor can become invalid + # and have to be reinitalized + if 'not valid at server' in exc.message: + self.purge() + + continue + + raise + else: + break + + self._offset += 1 + + return msg + next = __next__ + + +class Channel(virtual.Channel): + _client = None + supports_fanout = True + _fanout_queues = {} + + def __init__(self, *vargs, **kwargs): + super(Channel, self).__init__(*vargs, **kwargs) + + self._broadcast_cursors = {} + + # Evaluate connection + self._create_client() + + def _new_queue(self, queue, **kwargs): + pass + + def _get(self, queue): + if queue in self._fanout_queues: + try: + msg = next(self.get_broadcast_cursor(queue)) + except StopIteration: + msg = None + else: + msg = self.get_messages().find_and_modify( + query={'queue': queue}, + sort={'_id': pymongo.ASCENDING}, + remove=True, + ) + + if msg is None: + raise Empty() + + return loads(bytes_to_str(msg['payload'])) + + def _size(self, queue): + if queue in self._fanout_queues: + return self.get_broadcast_cursor(queue).get_size() + + return self.get_messages().find({'queue': queue}).count() + + def _put(self, queue, message, **kwargs): + self.get_messages().insert({'payload': dumps(message), + 'queue': queue}) + + def _purge(self, queue): + size = self._size(queue) + + if queue in self._fanout_queues: + self.get_broadcaset_cursor(queue).purge() + else: + self.get_messages().remove({'queue': queue}) + + return size + + def _parse_uri(self, scheme='mongodb://'): + # See mongodb uri documentation: + # http://docs.mongodb.org/manual/reference/connection-string/ + client = self.connection.client + hostname = client.hostname + + if not hostname.startswith(scheme): + hostname = scheme + hostname + + if not hostname[len(scheme):]: + hostname += DEFAULT_HOST + + if client.userid and '@' not in hostname: + head, tail = hostname.split('://') + + credentials = client.userid + if client.password: + credentials += ':' + client.password + + hostname = head + '://' + credentials + '@' + tail + + port = client.port if client.port is not None else DEFAULT_PORT + + parsed = uri_parser.parse_uri(hostname, port) + + dbname = parsed['database'] or client.virtual_host + + if dbname in ('/', None): + dbname = 'kombu_default' + + options = { + 'auto_start_request': True, + 'ssl': client.ssl, + 'connectTimeoutMS': (int(client.connect_timeout * 1000) + if client.connect_timeout else None), + } + options.update(client.transport_options) + options.update(parsed['options']) + + return hostname, dbname, options + + def _open(self, scheme='mongodb://'): + hostname, dbname, options = self._parse_uri(scheme=scheme) + + mongoconn = MongoClient( + host=hostname, ssl=options['ssl'], + auto_start_request=options['auto_start_request'], + connectTimeoutMS=options['connectTimeoutMS'], + use_greenlets=_detect_environment() != 'default', + ) + database = mongoconn[dbname] + + version = mongoconn.server_info()['version'] + if tuple(map(int, version.split('.')[:2])) < (1, 3): + raise NotImplementedError( + 'Kombu requires MongoDB version 1.3+ (server is {0})'.format( + version)) + + self._create_broadcast(database, options) + + self._client = database + + def _create_broadcast(self, database, options): + '''Create capped collection for broadcast messages.''' + if DEFAULT_BROADCAST_COLLECTION in database.collection_names(): + return + + capsize = options.get('capped_queue_size') or 100000 + database.create_collection(DEFAULT_BROADCAST_COLLECTION, + size=capsize, capped=True) + + def _ensure_indexes(self): + '''Ensure indexes on collections.''' + self.get_messages().ensure_index( + [('queue', 1), ('_id', 1)], background=True, + ) + self.get_broadcast().ensure_index([('queue', 1)]) + self.get_routing().ensure_index([('queue', 1), ('exchange', 1)]) + + # TODO Store a more complete exchange metatable in the routing collection + def get_table(self, exchange): + """Get table of bindings for ``exchange``.""" + localRoutes = frozenset(self.state.exchanges[exchange]['table']) + brokerRoutes = self.get_messages().routing.find( + {'exchange': exchange} + ) + + return localRoutes | frozenset((r['routing_key'], + r['pattern'], + r['queue']) for r in brokerRoutes) + + def _put_fanout(self, exchange, message, routing_key, **kwargs): + """Deliver fanout message.""" + self.get_broadcast().insert({'payload': dumps(message), + 'queue': exchange}) + + def _queue_bind(self, exchange, routing_key, pattern, queue): + if self.typeof(exchange).type == 'fanout': + self.create_broadcast_cursor(exchange, routing_key, pattern, queue) + self._fanout_queues[queue] = exchange + + meta = {'exchange': exchange, + 'queue': queue, + 'routing_key': routing_key, + 'pattern': pattern} + self.get_routing().update(meta, meta, upsert=True) + + def queue_delete(self, queue, **kwargs): + self.get_routing().remove({'queue': queue}) + + super(Channel, self).queue_delete(queue, **kwargs) + + if queue in self._fanout_queues: + try: + cursor = self._broadcast_cursors.pop(queue) + except KeyError: + pass + else: + cursor.close() + + self._fanout_queues.pop(queue) + + def _create_client(self): + self._open() + self._ensure_indexes() + + @property + def client(self): + if self._client is None: + self._create_client() + return self._client + + def get_messages(self): + return self.client[DEFAULT_MESSAGES_COLLECTION] + + def get_routing(self): + return self.client[DEFAULT_ROUTING_COLLECTION] + + def get_broadcast(self): + return self.client[DEFAULT_BROADCAST_COLLECTION] + + def get_broadcast_cursor(self, queue): + try: + return self._broadcast_cursors[queue] + except KeyError: + # Cursor may be absent when Channel created more than once. + # _fanout_queues is a class-level mutable attribute so it's + # shared over all Channel instances. + return self.create_broadcast_cursor( + self._fanout_queues[queue], None, None, queue, + ) + + def create_broadcast_cursor(self, exchange, routing_key, pattern, queue): + cursor = self.get_broadcast().find( + query={'queue': exchange}, + sort=[('$natural', 1)], + tailable=True, + ) + ret = self._broadcast_cursors[queue] = BroadcastCursor(cursor) + return ret + + +class Transport(virtual.Transport): + Channel = Channel + + can_parse_url = True + polling_interval = 1 + default_port = DEFAULT_PORT + connection_errors = ( + virtual.Transport.connection_errors + (errors.ConnectionFailure, ) + ) + channel_errors = ( + virtual.Transport.channel_errors + ( + errors.ConnectionFailure, + errors.OperationFailure) + ) + driver_type = 'mongodb' + driver_name = 'pymongo' + + def driver_version(self): + return pymongo.version diff --git a/kombu/transport/pyamqp.py b/kombu/transport/pyamqp.py new file mode 100644 index 0000000..0184430 --- /dev/null +++ b/kombu/transport/pyamqp.py @@ -0,0 +1,146 @@ +""" +kombu.transport.pyamqp +====================== + +pure python amqp transport. + +""" +from __future__ import absolute_import + +import amqp + +from kombu.five import items +from kombu.utils.amq_manager import get_manager +from kombu.utils.text import version_string_as_tuple + +from . import base + +DEFAULT_PORT = 5672 + + +class Message(base.Message): + + def __init__(self, channel, msg, **kwargs): + props = msg.properties + super(Message, self).__init__( + channel, + body=msg.body, + delivery_tag=msg.delivery_tag, + content_type=props.get('content_type'), + content_encoding=props.get('content_encoding'), + delivery_info=msg.delivery_info, + properties=msg.properties, + headers=props.get('application_headers') or {}, + **kwargs) + + +class Channel(amqp.Channel, base.StdChannel): + Message = Message + + def prepare_message(self, body, priority=None, + content_type=None, content_encoding=None, + headers=None, properties=None, _Message=amqp.Message): + """Prepares message so that it can be sent using this transport.""" + return _Message( + body, + priority=priority, + content_type=content_type, + content_encoding=content_encoding, + application_headers=headers, + **properties or {} + ) + + def message_to_python(self, raw_message): + """Convert encoded message body back to a Python value.""" + return self.Message(self, raw_message) + + +class Connection(amqp.Connection): + Channel = Channel + + +class Transport(base.Transport): + Connection = Connection + + default_port = DEFAULT_PORT + + # it's very annoying that pyamqp sometimes raises AttributeError + # if the connection is lost, but nothing we can do about that here. + connection_errors = amqp.Connection.connection_errors + channel_errors = amqp.Connection.channel_errors + recoverable_connection_errors = \ + amqp.Connection.recoverable_connection_errors + recoverable_channel_errors = amqp.Connection.recoverable_channel_errors + + driver_name = 'py-amqp' + driver_type = 'amqp' + supports_heartbeats = True + supports_ev = True + + def __init__(self, client, default_port=None, **kwargs): + self.client = client + self.default_port = default_port or self.default_port + + def driver_version(self): + return amqp.__version__ + + def create_channel(self, connection): + return connection.channel() + + def drain_events(self, connection, **kwargs): + return connection.drain_events(**kwargs) + + def establish_connection(self): + """Establish connection to the AMQP broker.""" + conninfo = self.client + for name, default_value in items(self.default_connection_params): + if not getattr(conninfo, name, None): + setattr(conninfo, name, default_value) + if conninfo.hostname == 'localhost': + conninfo.hostname = '127.0.0.1' + opts = dict({ + 'host': conninfo.host, + 'userid': conninfo.userid, + 'password': conninfo.password, + 'login_method': conninfo.login_method, + 'virtual_host': conninfo.virtual_host, + 'insist': conninfo.insist, + 'ssl': conninfo.ssl, + 'connect_timeout': conninfo.connect_timeout, + 'heartbeat': conninfo.heartbeat, + }, **conninfo.transport_options or {}) + conn = self.Connection(**opts) + conn.client = self.client + return conn + + def verify_connection(self, connection): + return connection.connected + + def close_connection(self, connection): + """Close the AMQP broker connection.""" + connection.client = None + connection.close() + + def get_heartbeat_interval(self, connection): + return connection.heartbeat + + def register_with_event_loop(self, connection, loop): + loop.add_reader(connection.sock, self.on_readable, connection, loop) + + def heartbeat_check(self, connection, rate=2): + return connection.heartbeat_tick(rate=rate) + + def qos_semantics_matches_spec(self, connection): + props = connection.server_properties + if props.get('product') == 'RabbitMQ': + return version_string_as_tuple(props['version']) < (3, 3) + return True + + @property + def default_connection_params(self): + return {'userid': 'guest', 'password': 'guest', + 'port': self.default_port, + 'hostname': 'localhost', 'login_method': 'AMQPLAIN'} + + def get_manager(self, *args, **kwargs): + return get_manager(self.client, *args, **kwargs) diff --git a/kombu/transport/pyro.py b/kombu/transport/pyro.py new file mode 100644 index 0000000..b87a5fb --- /dev/null +++ b/kombu/transport/pyro.py @@ -0,0 +1,99 @@ +""" +kombu.transport.pyro +====================== + +Pyro transport. + +Requires the :mod:`Pyro4` library to be installed. + +""" +from __future__ import absolute_import + +import sys + +from kombu.five import reraise +from kombu.utils import cached_property + +from . import virtual + +try: + import Pyro4 as pyro + from Pyro4.errors import NamingError +except ImportError: # pragma: no cover + pyro = NamingError = None # noqa + +DEFAULT_PORT = 9090 +E_LOOKUP = """\ +Unable to locate pyro nameserver {0.virtual_host} on host {0.hostname}\ +""" + + +class Channel(virtual.Channel): + + def queues(self): + return self.shared_queues.get_queue_names() + + def _new_queue(self, queue, **kwargs): + if queue not in self.queues(): + self.shared_queues.new_queue(queue) + + def _get(self, queue, timeout=None): + queue = self._queue_for(queue) + msg = self.shared_queues._get(queue) + return msg + + def _queue_for(self, queue): + if queue not in self.queues(): + self.shared_queues.new_queue(queue) + return queue + + def _put(self, queue, message, **kwargs): + queue = self._queue_for(queue) + self.shared_queues._put(queue, message) + + def _size(self, queue): + return self.shared_queues._size(queue) + + def _delete(self, queue, *args): + self.shared_queues._delete(queue) + + def _purge(self, queue): + return self.shared_queues._purge(queue) + + def after_reply_message_received(self, queue): + pass + + @cached_property + def shared_queues(self): + return self.connection.shared_queues + + +class Transport(virtual.Transport): + Channel = Channel + + #: memory backend state is global. + state = virtual.BrokerState() + + default_port = DEFAULT_PORT + + driver_type = driver_name = 'pyro' + + def _open(self): + conninfo = self.client + pyro.config.HMAC_KEY = conninfo.virtual_host + try: + nameserver = pyro.locateNS(host=conninfo.hostname, + port=self.default_port) + # name of registered pyro object + uri = nameserver.lookup(conninfo.virtual_host) + return pyro.Proxy(uri) + except NamingError: + reraise(NamingError, NamingError(E_LOOKUP.format(conninfo)), + sys.exc_info()[2]) + + def driver_version(self): + return pyro.__version__ + + @cached_property + def shared_queues(self): + return self._open() diff --git a/kombu/transport/redis.py b/kombu/transport/redis.py new file mode 100644 index 0000000..683f396 --- /dev/null +++ b/kombu/transport/redis.py @@ -0,0 +1,957 @@ +""" +kombu.transport.redis +===================== + +Redis transport. + +""" +from __future__ import absolute_import + +import numbers +import socket + +from bisect import bisect +from collections import namedtuple +from contextlib import contextmanager +from time import time + +from amqp import promise +from anyjson import loads, dumps + +from kombu.exceptions import InconsistencyError, VersionMismatch +from kombu.five import Empty, values, string_t +from kombu.log import get_logger +from kombu.utils import cached_property, uuid +from kombu.utils.eventio import poll, READ, ERR +from kombu.utils.encoding import bytes_to_str +from kombu.utils.url import _parse_url + +NO_ROUTE_ERROR = """ +Cannot route message for exchange {0!r}: Table empty or key no longer exists. +Probably the key ({1!r}) has been removed from the Redis database. +""" + +try: + from billiard.util import register_after_fork +except ImportError: # pragma: no cover + try: + from multiprocessing.util import register_after_fork # noqa + except ImportError: + def register_after_fork(*args, **kwargs): # noqa + pass + +try: + import redis +except ImportError: # pragma: no cover + redis = None # noqa + +from . import virtual + +logger = get_logger('kombu.transport.redis') +crit, warn = logger.critical, logger.warn + +DEFAULT_PORT = 6379 +DEFAULT_DB = 0 + +PRIORITY_STEPS = [0, 3, 6, 9] + +error_classes_t = namedtuple('error_classes_t', ( + 'connection_errors', 'channel_errors', +)) + +# This implementation may seem overly complex, but I assure you there is +# a good reason for doing it this way. +# +# Consuming from several connections enables us to emulate channels, +# which means we can have different service guarantees for individual +# channels. +# +# So we need to consume messages from multiple connections simultaneously, +# and using epoll means we don't have to do so using multiple threads. +# +# Also it means we can easily use PUBLISH/SUBSCRIBE to do fanout +# exchanges (broadcast), as an alternative to pushing messages to fanout-bound +# queues manually. + + +def get_redis_error_classes(): + from redis import exceptions + # This exception suddenly changed name between redis-py versions + if hasattr(exceptions, 'InvalidData'): + DataError = exceptions.InvalidData + else: + DataError = exceptions.DataError + return error_classes_t( + (virtual.Transport.connection_errors + ( + InconsistencyError, + socket.error, + IOError, + OSError, + exceptions.ConnectionError, + exceptions.AuthenticationError)), + (virtual.Transport.channel_errors + ( + DataError, + exceptions.InvalidResponse, + exceptions.ResponseError)), + ) + + +class MutexHeld(Exception): + pass + + +@contextmanager +def Mutex(client, name, expire): + lock_id = uuid() + i_won = client.setnx(name, lock_id) + try: + if i_won: + client.expire(name, expire) + yield + else: + if not client.ttl(name): + client.expire(name, expire) + raise MutexHeld() + finally: + if i_won: + pipe = client.pipeline(True) + try: + pipe.watch(name) + if pipe.get(name) == lock_id: + pipe.multi() + pipe.delete(name) + pipe.execute() + pipe.unwatch() + except redis.WatchError: + pass + + +class QoS(virtual.QoS): + restore_at_shutdown = True + + def __init__(self, *args, **kwargs): + super(QoS, self).__init__(*args, **kwargs) + self._vrestore_count = 0 + + def append(self, message, delivery_tag): + delivery = message.delivery_info + EX, RK = delivery['exchange'], delivery['routing_key'] + with self.pipe_or_acquire() as pipe: + pipe.zadd(self.unacked_index_key, delivery_tag, time()) \ + .hset(self.unacked_key, delivery_tag, + dumps([message._raw, EX, RK])) \ + .execute() + super(QoS, self).append(message, delivery_tag) + + def restore_unacked(self): + for tag in self._delivered: + self.restore_by_tag(tag) + self._delivered.clear() + + def ack(self, delivery_tag): + self._remove_from_indices(delivery_tag).execute() + super(QoS, self).ack(delivery_tag) + + def reject(self, delivery_tag, requeue=False): + if requeue: + self.restore_by_tag(delivery_tag, leftmost=True) + self.ack(delivery_tag) + + @contextmanager + def pipe_or_acquire(self, pipe=None): + if pipe: + yield pipe + else: + with self.channel.conn_or_acquire() as client: + yield client.pipeline() + + def _remove_from_indices(self, delivery_tag, pipe=None): + with self.pipe_or_acquire(pipe) as pipe: + return pipe.zrem(self.unacked_index_key, delivery_tag) \ + .hdel(self.unacked_key, delivery_tag) + + def restore_visible(self, start=0, num=10, interval=10): + self._vrestore_count += 1 + if (self._vrestore_count - 1) % interval: + return + with self.channel.conn_or_acquire() as client: + ceil = time() - self.visibility_timeout + try: + with Mutex(client, self.unacked_mutex_key, + self.unacked_mutex_expire): + visible = client.zrevrangebyscore( + self.unacked_index_key, ceil, 0, + start=num and start, num=num, withscores=True) + for tag, score in visible or []: + self.restore_by_tag(tag, client) + except MutexHeld: + pass + + def restore_by_tag(self, tag, client=None, leftmost=False): + with self.channel.conn_or_acquire(client) as client: + p, _, _ = self._remove_from_indices( + tag, client.pipeline().hget(self.unacked_key, tag)).execute() + if p: + M, EX, RK = loads(bytes_to_str(p)) # json is unicode + self.channel._do_restore_message(M, EX, RK, client, leftmost) + + @cached_property + def unacked_key(self): + return self.channel.unacked_key + + @cached_property + def unacked_index_key(self): + return self.channel.unacked_index_key + + @cached_property + def unacked_mutex_key(self): + return self.channel.unacked_mutex_key + + @cached_property + def unacked_mutex_expire(self): + return self.channel.unacked_mutex_expire + + @cached_property + def visibility_timeout(self): + return self.channel.visibility_timeout + + +class MultiChannelPoller(object): + eventflags = READ | ERR + + #: Set by :meth:`get` while reading from the socket. + _in_protected_read = False + + #: Set of one-shot callbacks to call after reading from socket. + after_read = None + + def __init__(self): + # active channels + self._channels = set() + # file descriptor -> channel map. + self._fd_to_chan = {} + # channel -> socket map + self._chan_to_sock = {} + # poll implementation (epoll/kqueue/select) + self.poller = poll() + # one-shot callbacks called after reading from socket. + self.after_read = set() + + def close(self): + for fd in values(self._chan_to_sock): + try: + self.poller.unregister(fd) + except (KeyError, ValueError): + pass + self._channels.clear() + self._fd_to_chan.clear() + self._chan_to_sock.clear() + + def add(self, channel): + self._channels.add(channel) + + def discard(self, channel): + self._channels.discard(channel) + + def _on_connection_disconnect(self, connection): + sock = getattr(connection, '_sock', None) + if sock is not None: + self.poller.unregister(sock) + + def _register(self, channel, client, type): + if (channel, client, type) in self._chan_to_sock: + self._unregister(channel, client, type) + if client.connection._sock is None: # not connected yet. + client.connection.connect() + sock = client.connection._sock + self._fd_to_chan[sock.fileno()] = (channel, type) + self._chan_to_sock[(channel, client, type)] = sock + self.poller.register(sock, self.eventflags) + + def _unregister(self, channel, client, type): + self.poller.unregister(self._chan_to_sock[(channel, client, type)]) + + def _register_BRPOP(self, channel): + """enable BRPOP mode for channel.""" + ident = channel, channel.client, 'BRPOP' + if channel.client.connection._sock is None or \ + ident not in self._chan_to_sock: + channel._in_poll = False + self._register(*ident) + + if not channel._in_poll: # send BRPOP + channel._brpop_start() + + def _register_LISTEN(self, channel): + """enable LISTEN mode for channel.""" + if channel.subclient.connection._sock is None: + channel._in_listen = False + self._register(channel, channel.subclient, 'LISTEN') + if not channel._in_listen: + channel._subscribe() # send SUBSCRIBE + + def on_poll_start(self): + for channel in self._channels: + if channel.active_queues: # BRPOP mode? + if channel.qos.can_consume(): + self._register_BRPOP(channel) + if channel.active_fanout_queues: # LISTEN mode? + self._register_LISTEN(channel) + + def on_poll_init(self, poller): + self.poller = poller + for channel in self._channels: + return channel.qos.restore_visible( + num=channel.unacked_restore_limit, + ) + + def maybe_restore_messages(self): + for channel in self._channels: + if channel.active_queues: + # only need to do this once, as they are not local to channel. + return channel.qos.restore_visible( + num=channel.unacked_restore_limit, + ) + + def on_readable(self, fileno): + chan, type = self._fd_to_chan[fileno] + if chan.qos.can_consume(): + return chan.handlers[type]() + + def handle_event(self, fileno, event): + if event & READ: + return self.on_readable(fileno), self + elif event & ERR: + chan, type = self._fd_to_chan[fileno] + chan._poll_error(type) + + def get(self, timeout=None): + self._in_protected_read = True + try: + for channel in self._channels: + if channel.active_queues: # BRPOP mode? + if channel.qos.can_consume(): + self._register_BRPOP(channel) + if channel.active_fanout_queues: # LISTEN mode? + self._register_LISTEN(channel) + + events = self.poller.poll(timeout) + for fileno, event in events or []: + ret = self.handle_event(fileno, event) + if ret: + return ret + + # - no new data, so try to restore messages. + # - reset active redis commands. + self.maybe_restore_messages() + + raise Empty() + finally: + self._in_protected_read = False + while self.after_read: + try: + fun = self.after_read.pop() + except KeyError: + break + else: + fun() + + @property + def fds(self): + return self._fd_to_chan + + +class Channel(virtual.Channel): + QoS = QoS + + _client = None + _subclient = None + supports_fanout = True + keyprefix_queue = '_kombu.binding.%s' + keyprefix_fanout = '/{db}.' + sep = '\x06\x16' + _in_poll = False + _in_listen = False + _fanout_queues = {} + ack_emulation = True + unacked_key = 'unacked' + unacked_index_key = 'unacked_index' + unacked_mutex_key = 'unacked_mutex' + unacked_mutex_expire = 300 # 5 minutes + unacked_restore_limit = None + visibility_timeout = 3600 # 1 hour + priority_steps = PRIORITY_STEPS + socket_timeout = None + max_connections = 10 + #: Transport option to enable disable fanout keyprefix. + #: Should be enabled by default, but that is not + #: backwards compatible. Can also be string, in which + #: case it changes the default prefix ('/{db}.') into to something + #: else. The prefix must include a leading slash and a trailing dot. + fanout_prefix = False + + #: If enabled the fanout exchange will support patterns in routing + #: and binding keys (like a topic exchange but using PUB/SUB). + #: This will be enabled by default in a future version. + fanout_patterns = False + _pool = None + + from_transport_options = ( + virtual.Channel.from_transport_options + + ('ack_emulation', + 'unacked_key', + 'unacked_index_key', + 'unacked_mutex_key', + 'unacked_mutex_expire', + 'visibility_timeout', + 'unacked_restore_limit', + 'fanout_prefix', + 'fanout_patterns', + 'socket_timeout', + 'max_connections', + 'priority_steps') # <-- do not add comma here! + ) + + def __init__(self, *args, **kwargs): + super_ = super(Channel, self) + super_.__init__(*args, **kwargs) + + if not self.ack_emulation: # disable visibility timeout + self.QoS = virtual.QoS + + self._queue_cycle = [] + self.Client = self._get_client() + self.ResponseError = self._get_response_error() + self.active_fanout_queues = set() + self.auto_delete_queues = set() + self._fanout_to_queue = {} + self.handlers = {'BRPOP': self._brpop_read, 'LISTEN': self._receive} + + if self.fanout_prefix: + if isinstance(self.fanout_prefix, string_t): + self.keyprefix_fanout = self.fanout_prefix + else: + # previous versions did not set a fanout, so cannot enable + # by default. + self.keyprefix_fanout = '' + + # Evaluate connection. + try: + self.client.info() + except Exception: + if self._pool: + self._pool.disconnect() + raise + + self.connection.cycle.add(self) # add to channel poller. + # copy errors, in case channel closed but threads still + # are still waiting for data. + self.connection_errors = self.connection.connection_errors + + register_after_fork(self, self._after_fork) + + def _after_fork(self): + if self._pool is not None: + self._pool.disconnect() + + def _on_connection_disconnect(self, connection): + if self.connection and self.connection.cycle: + self.connection.cycle._on_connection_disconnect(connection) + + def _do_restore_message(self, payload, exchange, routing_key, + client=None, leftmost=False): + with self.conn_or_acquire(client) as client: + try: + try: + payload['headers']['redelivered'] = True + except KeyError: + pass + for queue in self._lookup(exchange, routing_key): + (client.lpush if leftmost else client.rpush)( + queue, dumps(payload), + ) + except Exception: + crit('Could not restore message: %r', payload, exc_info=True) + + def _restore(self, message, leftmost=False): + if not self.ack_emulation: + return super(Channel, self)._restore(message) + tag = message.delivery_tag + with self.conn_or_acquire() as client: + P, _ = client.pipeline() \ + .hget(self.unacked_key, tag) \ + .hdel(self.unacked_key, tag) \ + .execute() + if P: + M, EX, RK = loads(bytes_to_str(P)) # json is unicode + self._do_restore_message(M, EX, RK, client, leftmost) + + def _restore_at_beginning(self, message): + return self._restore(message, leftmost=True) + + def basic_consume(self, queue, *args, **kwargs): + if queue in self._fanout_queues: + exchange, _ = self._fanout_queues[queue] + self.active_fanout_queues.add(queue) + self._fanout_to_queue[exchange] = queue + ret = super(Channel, self).basic_consume(queue, *args, **kwargs) + self._update_cycle() + return ret + + def basic_cancel(self, consumer_tag): + # If we are busy reading messages we may experience + # a race condition where a message is consumed after + # cancelling, so we must delay this operation until reading + # is complete (Issue celery/celery#1773). + connection = self.connection + if connection: + if connection.cycle._in_protected_read: + return connection.cycle.after_read.add( + promise(self._basic_cancel, (consumer_tag, )), + ) + return self._basic_cancel(consumer_tag) + + def _basic_cancel(self, consumer_tag): + try: + queue = self._tag_to_queue[consumer_tag] + except KeyError: + return + try: + self.active_fanout_queues.remove(queue) + except KeyError: + pass + else: + self._unsubscribe_from(queue) + try: + exchange, _ = self._fanout_queues[queue] + self._fanout_to_queue.pop(exchange) + except KeyError: + pass + ret = super(Channel, self).basic_cancel(consumer_tag) + self._update_cycle() + return ret + + def _get_publish_topic(self, exchange, routing_key): + if routing_key and self.fanout_patterns: + return ''.join([self.keyprefix_fanout, exchange, '/', routing_key]) + return ''.join([self.keyprefix_fanout, exchange]) + + def _get_subscribe_topic(self, queue): + exchange, routing_key = self._fanout_queues[queue] + return self._get_publish_topic(exchange, routing_key) + + def _subscribe(self): + keys = [self._get_subscribe_topic(queue) + for queue in self.active_fanout_queues] + if not keys: + return + c = self.subclient + if c.connection._sock is None: + c.connection.connect() + self._in_listen = True + c.psubscribe(keys) + + def _unsubscribe_from(self, queue): + topic = self._get_subscribe_topic(queue) + c = self.subclient + should_disconnect = False + if c.connection._sock is None: + c.connection.connect() + should_disconnect = True + try: + c.unsubscribe([topic]) + finally: + if should_disconnect and c.connection: + c.connection.disconnect() + + def _handle_message(self, client, r): + if bytes_to_str(r[0]) == 'unsubscribe' and r[2] == 0: + client.subscribed = False + elif bytes_to_str(r[0]) == 'pmessage': + return {'type': r[0], 'pattern': r[1], + 'channel': r[2], 'data': r[3]} + else: + return {'type': r[0], 'pattern': None, + 'channel': r[1], 'data': r[2]} + + def _receive(self): + c = self.subclient + response = None + try: + response = c.parse_response() + except self.connection_errors: + self._in_listen = False + raise Empty() + if response is not None: + payload = self._handle_message(c, response) + if bytes_to_str(payload['type']).endswith('message'): + channel = bytes_to_str(payload['channel']) + if payload['data']: + if channel[0] == '/': + _, _, channel = channel.partition('.') + try: + message = loads(bytes_to_str(payload['data'])) + except (TypeError, ValueError): + warn('Cannot process event on channel %r: %s', + channel, repr(payload)[:4096], exc_info=1) + raise Empty() + exchange = channel.split('/', 1)[0] + return message, self._fanout_to_queue[exchange] + raise Empty() + + def _brpop_start(self, timeout=1): + queues = self._consume_cycle() + if not queues: + return + keys = [self._q_for_pri(queue, pri) for pri in PRIORITY_STEPS + for queue in queues] + [timeout or 0] + self._in_poll = True + self.client.connection.send_command('BRPOP', *keys) + + def _brpop_read(self, **options): + try: + try: + dest__item = self.client.parse_response(self.client.connection, + 'BRPOP', + **options) + except self.connection_errors: + # if there's a ConnectionError, disconnect so the next + # iteration will reconnect automatically. + self.client.connection.disconnect() + raise Empty() + if dest__item: + dest, item = dest__item + dest = bytes_to_str(dest).rsplit(self.sep, 1)[0] + self._rotate_cycle(dest) + return loads(bytes_to_str(item)), dest + else: + raise Empty() + finally: + self._in_poll = False + + def _poll_error(self, type, **options): + if type == 'LISTEN': + self.subclient.parse_response() + else: + self.client.parse_response(self.client.connection, type) + + def _get(self, queue): + with self.conn_or_acquire() as client: + for pri in PRIORITY_STEPS: + item = client.rpop(self._q_for_pri(queue, pri)) + if item: + return loads(bytes_to_str(item)) + raise Empty() + + def _size(self, queue): + with self.conn_or_acquire() as client: + cmds = client.pipeline() + for pri in PRIORITY_STEPS: + cmds = cmds.llen(self._q_for_pri(queue, pri)) + sizes = cmds.execute() + return sum(size for size in sizes + if isinstance(size, numbers.Integral)) + + def _q_for_pri(self, queue, pri): + pri = self.priority(pri) + return '%s%s%s' % ((queue, self.sep, pri) if pri else (queue, '', '')) + + def priority(self, n): + steps = self.priority_steps + return steps[bisect(steps, n) - 1] + + def _put(self, queue, message, **kwargs): + """Deliver message.""" + try: + pri = max(min(int( + message['properties']['delivery_info']['priority']), 9), 0) + except (TypeError, ValueError, KeyError): + pri = 0 + with self.conn_or_acquire() as client: + client.lpush(self._q_for_pri(queue, pri), dumps(message)) + + def _put_fanout(self, exchange, message, routing_key, **kwargs): + """Deliver fanout message.""" + with self.conn_or_acquire() as client: + client.publish( + self._get_publish_topic(exchange, routing_key), + dumps(message), + ) + + def _new_queue(self, queue, auto_delete=False, **kwargs): + if auto_delete: + self.auto_delete_queues.add(queue) + + def _queue_bind(self, exchange, routing_key, pattern, queue): + if self.typeof(exchange).type == 'fanout': + # Mark exchange as fanout. + self._fanout_queues[queue] = ( + exchange, routing_key.replace('#', '*'), + ) + with self.conn_or_acquire() as client: + client.sadd(self.keyprefix_queue % (exchange, ), + self.sep.join([routing_key or '', + pattern or '', + queue or ''])) + + def _delete(self, queue, exchange, routing_key, pattern, *args): + self.auto_delete_queues.discard(queue) + with self.conn_or_acquire() as client: + client.srem(self.keyprefix_queue % (exchange, ), + self.sep.join([routing_key or '', + pattern or '', + queue or ''])) + cmds = client.pipeline() + for pri in PRIORITY_STEPS: + cmds = cmds.delete(self._q_for_pri(queue, pri)) + cmds.execute() + + def _has_queue(self, queue, **kwargs): + with self.conn_or_acquire() as client: + cmds = client.pipeline() + for pri in PRIORITY_STEPS: + cmds = cmds.exists(self._q_for_pri(queue, pri)) + return any(cmds.execute()) + + def get_table(self, exchange): + key = self.keyprefix_queue % exchange + with self.conn_or_acquire() as client: + values = client.smembers(key) + if not values: + raise InconsistencyError(NO_ROUTE_ERROR.format(exchange, key)) + return [tuple(bytes_to_str(val).split(self.sep)) for val in values] + + def _purge(self, queue): + with self.conn_or_acquire() as client: + cmds = client.pipeline() + for pri in PRIORITY_STEPS: + priq = self._q_for_pri(queue, pri) + cmds = cmds.llen(priq).delete(priq) + sizes = cmds.execute() + return sum(sizes[::2]) + + def close(self): + if self._pool: + self._pool.disconnect() + if not self.closed: + # remove from channel poller. + self.connection.cycle.discard(self) + + # delete fanout bindings + for queue in self._fanout_queues: + if queue in self.auto_delete_queues: + self.queue_delete(queue) + + self._close_clients() + + super(Channel, self).close() + + def _close_clients(self): + # Close connections + for attr in 'client', 'subclient': + try: + self.__dict__[attr].connection.disconnect() + except (KeyError, AttributeError, self.ResponseError): + pass + + def _prepare_virtual_host(self, vhost): + if not isinstance(vhost, numbers.Integral): + if not vhost or vhost == '/': + vhost = DEFAULT_DB + elif vhost.startswith('/'): + vhost = vhost[1:] + try: + vhost = int(vhost) + except ValueError: + raise ValueError( + 'Database is int between 0 and limit - 1, not {0}'.format( + vhost, + )) + return vhost + + def _connparams(self): + conninfo = self.connection.client + connparams = {'host': conninfo.hostname or '127.0.0.1', + 'port': conninfo.port or DEFAULT_PORT, + 'virtual_host': conninfo.virtual_host, + 'password': conninfo.password, + 'max_connections': self.max_connections, + 'socket_timeout': self.socket_timeout} + host = connparams['host'] + if '://' in host: + scheme, _, _, _, _, path, query = _parse_url(host) + if scheme == 'socket': + connparams.update({ + 'connection_class': redis.UnixDomainSocketConnection, + 'path': '/' + path}, **query) + connparams.pop('host', None) + connparams.pop('port', None) + connparams['db'] = self._prepare_virtual_host( + connparams.pop('virtual_host', None)) + + channel = self + connection_cls = ( + connparams.get('connection_class') or + redis.Connection + ) + + class Connection(connection_cls): + def disconnect(self): + channel._on_connection_disconnect(self) + super(Connection, self).disconnect() + connparams['connection_class'] = Connection + + return connparams + + def _create_client(self): + return self.Client(connection_pool=self.pool) + + def _get_pool(self): + params = self._connparams() + self.keyprefix_fanout = self.keyprefix_fanout.format(db=params['db']) + return redis.ConnectionPool(**params) + + def _get_client(self): + if redis.VERSION < (2, 4, 4): + raise VersionMismatch( + 'Redis transport requires redis-py versions 2.4.4 or later. ' + 'You have {0.__version__}'.format(redis)) + + # KombuRedis maintains a connection attribute on it's instance and + # uses that when executing commands + # This was added after redis-py was changed. + class KombuRedis(redis.Redis): # pragma: no cover + + def __init__(self, *args, **kwargs): + super(KombuRedis, self).__init__(*args, **kwargs) + self.connection = self.connection_pool.get_connection('_') + + return KombuRedis + + @contextmanager + def conn_or_acquire(self, client=None): + if client: + yield client + else: + if self._in_poll: + client = self._create_client() + try: + yield client + finally: + self.pool.release(client.connection) + else: + yield self.client + + @property + def pool(self): + if self._pool is None: + self._pool = self._get_pool() + return self._pool + + @cached_property + def client(self): + """Client used to publish messages, BRPOP etc.""" + return self._create_client() + + @cached_property + def subclient(self): + """Pub/Sub connection used to consume fanout queues.""" + client = self._create_client() + pubsub = client.pubsub() + pool = pubsub.connection_pool + pubsub.connection = pool.get_connection('pubsub', pubsub.shard_hint) + return pubsub + + def _update_cycle(self): + """Update fair cycle between queues. + + We cycle between queues fairly to make sure that + each queue is equally likely to be consumed from, + so that a very busy queue will not block others. + + This works by using Redis's `BRPOP` command and + by rotating the most recently used queue to the + and of the list. See Kombu github issue #166 for + more discussion of this method. + + """ + self._queue_cycle = list(self.active_queues) + + def _consume_cycle(self): + """Get a fresh list of queues from the queue cycle.""" + active = len(self.active_queues) + return self._queue_cycle[0:active] + + def _rotate_cycle(self, used): + """Move most recently used queue to end of list.""" + cycle = self._queue_cycle + try: + cycle.append(cycle.pop(cycle.index(used))) + except ValueError: + pass + + def _get_response_error(self): + from redis import exceptions + return exceptions.ResponseError + + @property + def active_queues(self): + """Set of queues being consumed from (excluding fanout queues).""" + return set(queue for queue in self._active_queues + if queue not in self.active_fanout_queues) + + +class Transport(virtual.Transport): + Channel = Channel + + polling_interval = None # disable sleep between unsuccessful polls. + default_port = DEFAULT_PORT + supports_ev = True + driver_type = 'redis' + driver_name = 'redis' + + def __init__(self, *args, **kwargs): + if redis is None: + raise ImportError('Missing redis library (pip install redis)') + super(Transport, self).__init__(*args, **kwargs) + + # Get redis-py exceptions. + self.connection_errors, self.channel_errors = self._get_errors() + # All channels share the same poller. + self.cycle = MultiChannelPoller() + + def driver_version(self): + return redis.__version__ + + def register_with_event_loop(self, connection, loop): + cycle = self.cycle + cycle.on_poll_init(loop.poller) + cycle_poll_start = cycle.on_poll_start + add_reader = loop.add_reader + on_readable = self.on_readable + + def _on_disconnect(connection): + if connection._sock: + loop.remove(connection._sock) + cycle._on_connection_disconnect = _on_disconnect + + def on_poll_start(): + cycle_poll_start() + [add_reader(fd, on_readable, fd) for fd in cycle.fds] + loop.on_tick.add(on_poll_start) + loop.call_repeatedly(10, cycle.maybe_restore_messages) + + def on_readable(self, fileno): + """Handle AIO event for one of our file descriptors.""" + item = self.cycle.on_readable(fileno) + if item: + message, queue = item + if not queue or queue not in self._callbacks: + raise KeyError( + 'Message for queue {0!r} without consumers: {1}'.format( + queue, message)) + self._callbacks[queue](message) + + def _get_errors(self): + """Utility to import redis-py's exceptions at runtime.""" + return get_redis_error_classes() diff --git a/kombu/transport/sqlalchemy/__init__.py b/kombu/transport/sqlalchemy/__init__.py new file mode 100644 index 0000000..3aab155 --- /dev/null +++ b/kombu/transport/sqlalchemy/__init__.py @@ -0,0 +1,160 @@ +"""Kombu transport using SQLAlchemy as the message store.""" +# SQLAlchemy overrides != False to have special meaning and pep8 complains +# flake8: noqa + +from __future__ import absolute_import + +from anyjson import loads, dumps +from sqlalchemy import create_engine +from sqlalchemy.exc import OperationalError +from sqlalchemy.orm import sessionmaker + +from kombu.five import Empty +from kombu.transport import virtual +from kombu.utils import cached_property +from kombu.utils.encoding import bytes_to_str + +from .models import (ModelBase, Queue as QueueBase, Message as MessageBase, + class_registry, metadata) + + +VERSION = (1, 1, 0) +__version__ = '.'.join(map(str, VERSION)) + + +class Channel(virtual.Channel): + _session = None + _engines = {} # engine cache + + def __init__(self, connection, **kwargs): + self._configure_entity_tablenames(connection.client.transport_options) + super(Channel, self).__init__(connection, **kwargs) + + def _configure_entity_tablenames(self, opts): + self.queue_tablename = opts.get('queue_tablename', 'kombu_queue') + self.message_tablename = opts.get('message_tablename', 'kombu_message') + + # + # Define the model definitions. This registers the declarative + # classes with the active SQLAlchemy metadata object. This *must* be + # done prior to the ``create_engine`` call. + # + self.queue_cls and self.message_cls + + def _engine_from_config(self): + conninfo = self.connection.client + transport_options = conninfo.transport_options.copy() + transport_options.pop('queue_tablename', None) + transport_options.pop('message_tablename', None) + return create_engine(conninfo.hostname, **transport_options) + + def _open(self): + conninfo = self.connection.client + if conninfo.hostname not in self._engines: + engine = self._engine_from_config() + Session = sessionmaker(bind=engine) + metadata.create_all(engine) + self._engines[conninfo.hostname] = engine, Session + return self._engines[conninfo.hostname] + + @property + def session(self): + if self._session is None: + _, Session = self._open() + self._session = Session() + return self._session + + def _get_or_create(self, queue): + obj = self.session.query(self.queue_cls) \ + .filter(self.queue_cls.name == queue).first() + if not obj: + obj = self.queue_cls(queue) + self.session.add(obj) + try: + self.session.commit() + except OperationalError: + self.session.rollback() + return obj + + def _new_queue(self, queue, **kwargs): + self._get_or_create(queue) + + def _put(self, queue, payload, **kwargs): + obj = self._get_or_create(queue) + message = self.message_cls(dumps(payload), obj) + self.session.add(message) + try: + self.session.commit() + except OperationalError: + self.session.rollback() + + def _get(self, queue): + obj = self._get_or_create(queue) + if self.session.bind.name == 'sqlite': + self.session.execute('BEGIN IMMEDIATE TRANSACTION') + try: + msg = self.session.query(self.message_cls) \ + .with_lockmode('update') \ + .filter(self.message_cls.queue_id == obj.id) \ + .filter(self.message_cls.visible != False) \ + .order_by(self.message_cls.sent_at) \ + .order_by(self.message_cls.id) \ + .limit(1) \ + .first() + if msg: + msg.visible = False + return loads(bytes_to_str(msg.payload)) + raise Empty() + finally: + self.session.commit() + + def _query_all(self, queue): + obj = self._get_or_create(queue) + return self.session.query(self.message_cls) \ + .filter(self.message_cls.queue_id == obj.id) + + def _purge(self, queue): + count = self._query_all(queue).delete(synchronize_session=False) + try: + self.session.commit() + except OperationalError: + self.session.rollback() + return count + + def _size(self, queue): + return self._query_all(queue).count() + + def _declarative_cls(self, name, base, ns): + if name in class_registry: + return class_registry[name] + return type(name, (base, ModelBase), ns) + + @cached_property + def queue_cls(self): + return self._declarative_cls( + 'Queue', + QueueBase, + {'__tablename__': self.queue_tablename} + ) + + @cached_property + def message_cls(self): + return self._declarative_cls( + 'Message', + MessageBase, + {'__tablename__': self.message_tablename} + ) + + +class Transport(virtual.Transport): + Channel = Channel + + can_parse_url = True + default_port = 0 + driver_type = 'sql' + driver_name = 'sqlalchemy' + connection_errors = (OperationalError, ) + + def driver_version(self): + import sqlalchemy + return sqlalchemy.__version__ diff --git a/kombu/transport/sqlalchemy/models.py b/kombu/transport/sqlalchemy/models.py new file mode 100644 index 0000000..4fa2bfe --- /dev/null +++ b/kombu/transport/sqlalchemy/models.py @@ -0,0 +1,62 @@ +from __future__ import absolute_import + +import datetime + +from sqlalchemy import (Column, Integer, String, Text, DateTime, + Sequence, Boolean, ForeignKey, SmallInteger) +from sqlalchemy.orm import relation +from sqlalchemy.ext.declarative import declarative_base, declared_attr +from sqlalchemy.schema import MetaData + +class_registry = {} +metadata = MetaData() +ModelBase = declarative_base(metadata=metadata, class_registry=class_registry) + + +class Queue(object): + __table_args__ = {'sqlite_autoincrement': True, 'mysql_engine': 'InnoDB'} + + id = Column(Integer, Sequence('queue_id_sequence'), primary_key=True, + autoincrement=True) + name = Column(String(200), unique=True) + + def __init__(self, name): + self.name = name + + def __str__(self): + return ''.format(self=self) + + @declared_attr + def messages(cls): + return relation('Message', backref='queue', lazy='noload') + + +class Message(object): + __table_args__ = {'sqlite_autoincrement': True, 'mysql_engine': 'InnoDB'} + + id = Column(Integer, Sequence('message_id_sequence'), + primary_key=True, autoincrement=True) + visible = Column(Boolean, default=True, index=True) + sent_at = Column('timestamp', DateTime, nullable=True, index=True, + onupdate=datetime.datetime.now) + payload = Column(Text, nullable=False) + version = Column(SmallInteger, nullable=False, default=1) + + __mapper_args__ = {'version_id_col': version} + + def __init__(self, payload, queue): + self.payload = payload + self.queue = queue + + def __str__(self): + return ''.format(self) + + @declared_attr + def queue_id(self): + return Column( + Integer, + ForeignKey( + '%s.id' % class_registry['Queue'].__tablename__, + name='FK_kombu_message_queue' + ) + ) diff --git a/kombu/transport/virtual/__init__.py b/kombu/transport/virtual/__init__.py new file mode 100644 index 0000000..ddcca47 --- /dev/null +++ b/kombu/transport/virtual/__init__.py @@ -0,0 +1,854 @@ +""" +kombu.transport.virtual +======================= + +Virtual transport implementation. + +Emulates the AMQ API for non-AMQ transports. + +""" +from __future__ import absolute_import, unicode_literals + +import base64 +import socket +import sys +import warnings + +from array import array +from itertools import count +from multiprocessing.util import Finalize +from time import sleep + +from amqp.protocol import queue_declare_ok_t + +from kombu.exceptions import ResourceError, ChannelError +from kombu.five import Empty, items, monotonic +from kombu.utils import emergency_dump_state, kwdict, say, uuid +from kombu.utils.compat import OrderedDict +from kombu.utils.encoding import str_to_bytes, bytes_to_str + +from kombu.transport import base + +from .scheduling import FairCycle +from .exchange import STANDARD_EXCHANGE_TYPES + +ARRAY_TYPE_H = 'H' if sys.version_info[0] == 3 else b'H' + +UNDELIVERABLE_FMT = """\ +Message could not be delivered: No queues bound to exchange {exchange!r} \ +using binding key {routing_key!r}. +""" + +NOT_EQUIVALENT_FMT = """\ +Cannot redeclare exchange {0!r} in vhost {1!r} with \ +different type, durable, autodelete or arguments value.\ +""" + + +class Base64(object): + + def encode(self, s): + return bytes_to_str(base64.b64encode(str_to_bytes(s))) + + def decode(self, s): + return base64.b64decode(str_to_bytes(s)) + + +class NotEquivalentError(Exception): + """Entity declaration is not equivalent to the previous declaration.""" + pass + + +class UndeliverableWarning(UserWarning): + """The message could not be delivered to a queue.""" + pass + + +class BrokerState(object): + + #: exchange declarations. + exchanges = None + + #: active bindings. + bindings = None + + def __init__(self, exchanges=None, bindings=None): + self.exchanges = {} if exchanges is None else exchanges + self.bindings = {} if bindings is None else bindings + + def clear(self): + self.exchanges.clear() + self.bindings.clear() + + +class QoS(object): + """Quality of Service guarantees. + + Only supports `prefetch_count` at this point. + + :param channel: AMQ Channel. + :keyword prefetch_count: Initial prefetch count (defaults to 0). + + """ + + #: current prefetch count value + prefetch_count = 0 + + #: :class:`~collections.OrderedDict` of active messages. + #: *NOTE*: Can only be modified by the consuming thread. + _delivered = None + + #: acks can be done by other threads than the consuming thread. + #: Instead of a mutex, which doesn't perform well here, we mark + #: the delivery tags as dirty, so subsequent calls to append() can remove + #: them. + _dirty = None + + #: If disabled, unacked messages won't be restored at shutdown. + restore_at_shutdown = True + + def __init__(self, channel, prefetch_count=0): + self.channel = channel + self.prefetch_count = prefetch_count or 0 + + self._delivered = OrderedDict() + self._delivered.restored = False + self._dirty = set() + self._quick_ack = self._dirty.add + self._quick_append = self._delivered.__setitem__ + self._on_collect = Finalize( + self, self.restore_unacked_once, exitpriority=1, + ) + + def can_consume(self): + """Return true if the channel can be consumed from. + + Used to ensure the client adhers to currently active + prefetch limits. + + """ + pcount = self.prefetch_count + return not pcount or len(self._delivered) - len(self._dirty) < pcount + + def can_consume_max_estimate(self): + """Returns the maximum number of messages allowed to be returned. + + Returns an estimated number of messages that a consumer may be allowed + to consume at once from the broker. This is used for services where + bulk 'get message' calls are preferred to many individual 'get message' + calls - like SQS. + + returns: + An integer > 0 + """ + pcount = self.prefetch_count + if pcount: + return max(pcount - (len(self._delivered) - len(self._dirty)), 0) + + def append(self, message, delivery_tag): + """Append message to transactional state.""" + if self._dirty: + self._flush() + self._quick_append(delivery_tag, message) + + def get(self, delivery_tag): + return self._delivered[delivery_tag] + + def _flush(self): + """Flush dirty (acked/rejected) tags from.""" + dirty = self._dirty + delivered = self._delivered + while 1: + try: + dirty_tag = dirty.pop() + except KeyError: + break + delivered.pop(dirty_tag, None) + + def ack(self, delivery_tag): + """Acknowledge message and remove from transactional state.""" + self._quick_ack(delivery_tag) + + def reject(self, delivery_tag, requeue=False): + """Remove from transactional state and requeue message.""" + if requeue: + self.channel._restore_at_beginning(self._delivered[delivery_tag]) + self._quick_ack(delivery_tag) + + def restore_unacked(self): + """Restore all unacknowledged messages.""" + self._flush() + delivered = self._delivered + errors = [] + restore = self.channel._restore + pop_message = delivered.popitem + + while delivered: + try: + _, message = pop_message() + except KeyError: # pragma: no cover + break + + try: + restore(message) + except BaseException as exc: + errors.append((exc, message)) + delivered.clear() + return errors + + def restore_unacked_once(self): + """Restores all unacknowledged messages at shutdown/gc collect. + + Will only be done once for each instance. + + """ + self._on_collect.cancel() + self._flush() + state = self._delivered + + if not self.restore_at_shutdown or not self.channel.do_restore: + return + if getattr(state, 'restored', None): + assert not state + return + try: + if state: + say('Restoring {0!r} unacknowledged message(s).', + len(self._delivered)) + unrestored = self.restore_unacked() + + if unrestored: + errors, messages = list(zip(*unrestored)) + say('UNABLE TO RESTORE {0} MESSAGES: {1}', + len(errors), errors) + emergency_dump_state(messages) + finally: + state.restored = True + + def restore_visible(self, *args, **kwargs): + """Restore any pending unackwnowledged messages for visibility_timeout + style implementations. + + Optional: Currently only used by the Redis transport. + + """ + pass + + +class Message(base.Message): + + def __init__(self, channel, payload, **kwargs): + self._raw = payload + properties = payload['properties'] + body = payload.get('body') + if body: + body = channel.decode_body(body, properties.get('body_encoding')) + kwargs.update({ + 'body': body, + 'delivery_tag': properties['delivery_tag'], + 'content_type': payload.get('content-type'), + 'content_encoding': payload.get('content-encoding'), + 'headers': payload.get('headers'), + 'properties': properties, + 'delivery_info': properties.get('delivery_info'), + 'postencode': 'utf-8', + }) + super(Message, self).__init__(channel, **kwdict(kwargs)) + + def serializable(self): + props = self.properties + body, _ = self.channel.encode_body(self.body, + props.get('body_encoding')) + headers = dict(self.headers) + # remove compression header + headers.pop('compression', None) + return { + 'body': body, + 'properties': props, + 'content-type': self.content_type, + 'content-encoding': self.content_encoding, + 'headers': headers, + } + + +class AbstractChannel(object): + """This is an abstract class defining the channel methods + you'd usually want to implement in a virtual channel. + + Do not subclass directly, but rather inherit from :class:`Channel` + instead. + + """ + + def _get(self, queue, timeout=None): + """Get next message from `queue`.""" + raise NotImplementedError('Virtual channels must implement _get') + + def _put(self, queue, message): + """Put `message` onto `queue`.""" + raise NotImplementedError('Virtual channels must implement _put') + + def _purge(self, queue): + """Remove all messages from `queue`.""" + raise NotImplementedError('Virtual channels must implement _purge') + + def _size(self, queue): + """Return the number of messages in `queue` as an :class:`int`.""" + return 0 + + def _delete(self, queue, *args, **kwargs): + """Delete `queue`. + + This just purges the queue, if you need to do more you can + override this method. + + """ + self._purge(queue) + + def _new_queue(self, queue, **kwargs): + """Create new queue. + + Your transport can override this method if it needs + to do something whenever a new queue is declared. + + """ + pass + + def _has_queue(self, queue, **kwargs): + """Verify that queue exists. + + Should return :const:`True` if the queue exists or :const:`False` + otherwise. + + """ + return True + + def _poll(self, cycle, timeout=None): + """Poll a list of queues for available messages.""" + return cycle.get() + + +class Channel(AbstractChannel, base.StdChannel): + """Virtual channel. + + :param connection: The transport instance this channel is part of. + + """ + #: message class used. + Message = Message + + #: QoS class used. + QoS = QoS + + #: flag to restore unacked messages when channel + #: goes out of scope. + do_restore = True + + #: mapping of exchange types and corresponding classes. + exchange_types = dict(STANDARD_EXCHANGE_TYPES) + + #: flag set if the channel supports fanout exchanges. + supports_fanout = False + + #: Binary <-> ASCII codecs. + codecs = {'base64': Base64()} + + #: Default body encoding. + #: NOTE: ``transport_options['body_encoding']`` will override this value. + body_encoding = 'base64' + + #: counter used to generate delivery tags for this channel. + _delivery_tags = count(1) + + #: Optional queue where messages with no route is delivered. + #: Set by ``transport_options['deadletter_queue']``. + deadletter_queue = None + + # List of options to transfer from :attr:`transport_options`. + from_transport_options = ('body_encoding', 'deadletter_queue') + + def __init__(self, connection, **kwargs): + self.connection = connection + self._consumers = set() + self._cycle = None + self._tag_to_queue = {} + self._active_queues = [] + self._qos = None + self.closed = False + + # instantiate exchange types + self.exchange_types = dict( + (typ, cls(self)) for typ, cls in items(self.exchange_types) + ) + + try: + self.channel_id = self.connection._avail_channel_ids.pop() + except IndexError: + raise ResourceError( + 'No free channel ids, current={0}, channel_max={1}'.format( + len(self.connection.channels), + self.connection.channel_max), (20, 10), + ) + + topts = self.connection.client.transport_options + for opt_name in self.from_transport_options: + try: + setattr(self, opt_name, topts[opt_name]) + except KeyError: + pass + + def exchange_declare(self, exchange=None, type='direct', durable=False, + auto_delete=False, arguments=None, + nowait=False, passive=False): + """Declare exchange.""" + type = type or 'direct' + exchange = exchange or 'amq.%s' % type + if passive: + if exchange not in self.state.exchanges: + raise ChannelError( + 'NOT_FOUND - no exchange {0!r} in vhost {1!r}'.format( + exchange, self.connection.client.virtual_host or '/'), + (50, 10), 'Channel.exchange_declare', '404', + ) + return + try: + prev = self.state.exchanges[exchange] + if not self.typeof(exchange).equivalent(prev, exchange, type, + durable, auto_delete, + arguments): + raise NotEquivalentError(NOT_EQUIVALENT_FMT.format( + exchange, self.connection.client.virtual_host or '/')) + except KeyError: + self.state.exchanges[exchange] = { + 'type': type, + 'durable': durable, + 'auto_delete': auto_delete, + 'arguments': arguments or {}, + 'table': [], + } + + def exchange_delete(self, exchange, if_unused=False, nowait=False): + """Delete `exchange` and all its bindings.""" + for rkey, _, queue in self.get_table(exchange): + self.queue_delete(queue, if_unused=True, if_empty=True) + self.state.exchanges.pop(exchange, None) + + def queue_declare(self, queue=None, passive=False, **kwargs): + """Declare queue.""" + queue = queue or 'amq.gen-%s' % uuid() + if passive and not self._has_queue(queue, **kwargs): + raise ChannelError( + 'NOT_FOUND - no queue {0!r} in vhost {1!r}'.format( + queue, self.connection.client.virtual_host or '/'), + (50, 10), 'Channel.queue_declare', '404', + ) + else: + self._new_queue(queue, **kwargs) + return queue_declare_ok_t(queue, self._size(queue), 0) + + def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs): + """Delete queue.""" + if if_empty and self._size(queue): + return + try: + exchange, routing_key, arguments = self.state.bindings[queue] + except KeyError: + return + meta = self.typeof(exchange).prepare_bind( + queue, exchange, routing_key, arguments, + ) + self._delete(queue, exchange, *meta) + self.state.bindings.pop(queue, None) + + def after_reply_message_received(self, queue): + self.queue_delete(queue) + + def exchange_bind(self, destination, source='', routing_key='', + nowait=False, arguments=None): + raise NotImplementedError('transport does not support exchange_bind') + + def exchange_unbind(self, destination, source='', routing_key='', + nowait=False, arguments=None): + raise NotImplementedError('transport does not support exchange_unbind') + + def queue_bind(self, queue, exchange=None, routing_key='', + arguments=None, **kwargs): + """Bind `queue` to `exchange` with `routing key`.""" + if queue in self.state.bindings: + return + exchange = exchange or 'amq.direct' + table = self.state.exchanges[exchange].setdefault('table', []) + self.state.bindings[queue] = exchange, routing_key, arguments + meta = self.typeof(exchange).prepare_bind( + queue, exchange, routing_key, arguments, + ) + table.append(meta) + if self.supports_fanout: + self._queue_bind(exchange, *meta) + + def queue_unbind(self, queue, exchange=None, routing_key='', + arguments=None, **kwargs): + raise NotImplementedError('transport does not support queue_unbind') + + def list_bindings(self): + return ((queue, exchange, rkey) + for exchange in self.state.exchanges + for rkey, pattern, queue in self.get_table(exchange)) + + def queue_purge(self, queue, **kwargs): + """Remove all ready messages from queue.""" + return self._purge(queue) + + def _next_delivery_tag(self): + return uuid() + + def basic_publish(self, message, exchange, routing_key, **kwargs): + """Publish message.""" + message['body'], body_encoding = self.encode_body( + message['body'], self.body_encoding, + ) + props = message['properties'] + props.update( + body_encoding=body_encoding, + delivery_tag=self._next_delivery_tag(), + ) + props['delivery_info'].update( + exchange=exchange, + routing_key=routing_key, + ) + if exchange: + return self.typeof(exchange).deliver( + message, exchange, routing_key, **kwargs + ) + # anon exchange: routing_key is the destination queue + return self._put(routing_key, message, **kwargs) + + def basic_consume(self, queue, no_ack, callback, consumer_tag, **kwargs): + """Consume from `queue`""" + self._tag_to_queue[consumer_tag] = queue + self._active_queues.append(queue) + + def _callback(raw_message): + message = self.Message(self, raw_message) + if not no_ack: + self.qos.append(message, message.delivery_tag) + return callback(message) + + self.connection._callbacks[queue] = _callback + self._consumers.add(consumer_tag) + + self._reset_cycle() + + def basic_cancel(self, consumer_tag): + """Cancel consumer by consumer tag.""" + if consumer_tag in self._consumers: + self._consumers.remove(consumer_tag) + self._reset_cycle() + queue = self._tag_to_queue.pop(consumer_tag, None) + try: + self._active_queues.remove(queue) + except ValueError: + pass + self.connection._callbacks.pop(queue, None) + + def basic_get(self, queue, no_ack=False, **kwargs): + """Get message by direct access (synchronous).""" + try: + message = self.Message(self, self._get(queue)) + if not no_ack: + self.qos.append(message, message.delivery_tag) + return message + except Empty: + pass + + def basic_ack(self, delivery_tag): + """Acknowledge message.""" + self.qos.ack(delivery_tag) + + def basic_recover(self, requeue=False): + """Recover unacked messages.""" + if requeue: + return self.qos.restore_unacked() + raise NotImplementedError('Does not support recover(requeue=False)') + + def basic_reject(self, delivery_tag, requeue=False): + """Reject message.""" + self.qos.reject(delivery_tag, requeue=requeue) + + def basic_qos(self, prefetch_size=0, prefetch_count=0, + apply_global=False): + """Change QoS settings for this channel. + + Only `prefetch_count` is supported. + + """ + self.qos.prefetch_count = prefetch_count + + def get_exchanges(self): + return list(self.state.exchanges) + + def get_table(self, exchange): + """Get table of bindings for `exchange`.""" + return self.state.exchanges[exchange]['table'] + + def typeof(self, exchange, default='direct'): + """Get the exchange type instance for `exchange`.""" + try: + type = self.state.exchanges[exchange]['type'] + except KeyError: + type = default + return self.exchange_types[type] + + def _lookup(self, exchange, routing_key, default=None): + """Find all queues matching `routing_key` for the given `exchange`. + + Must return the string `default` if no queues matched. + + """ + if default is None: + default = self.deadletter_queue + try: + R = self.typeof(exchange).lookup( + self.get_table(exchange), + exchange, routing_key, default, + ) + except KeyError: + R = [] + + if not R and default is not None: + warnings.warn(UndeliverableWarning(UNDELIVERABLE_FMT.format( + exchange=exchange, routing_key=routing_key)), + ) + self._new_queue(default) + R = [default] + return R + + def _restore(self, message): + """Redeliver message to its original destination.""" + delivery_info = message.delivery_info + message = message.serializable() + message['redelivered'] = True + for queue in self._lookup( + delivery_info['exchange'], delivery_info['routing_key']): + self._put(queue, message) + + def _restore_at_beginning(self, message): + return self._restore(message) + + def drain_events(self, timeout=None): + if self._consumers and self.qos.can_consume(): + if hasattr(self, '_get_many'): + return self._get_many(self._active_queues, timeout=timeout) + return self._poll(self.cycle, timeout=timeout) + raise Empty() + + def message_to_python(self, raw_message): + """Convert raw message to :class:`Message` instance.""" + if not isinstance(raw_message, self.Message): + return self.Message(self, payload=raw_message) + return raw_message + + def prepare_message(self, body, priority=None, content_type=None, + content_encoding=None, headers=None, properties=None): + """Prepare message data.""" + properties = properties or {} + info = properties.setdefault('delivery_info', {}) + info['priority'] = priority or 0 + + return {'body': body, + 'content-encoding': content_encoding, + 'content-type': content_type, + 'headers': headers or {}, + 'properties': properties or {}} + + def flow(self, active=True): + """Enable/disable message flow. + + :raises NotImplementedError: as flow + is not implemented by the base virtual implementation. + + """ + raise NotImplementedError('virtual channels do not support flow.') + + def close(self): + """Close channel, cancel all consumers, and requeue unacked + messages.""" + if not self.closed: + self.closed = True + for consumer in list(self._consumers): + self.basic_cancel(consumer) + if self._qos: + self._qos.restore_unacked_once() + if self._cycle is not None: + self._cycle.close() + self._cycle = None + if self.connection is not None: + self.connection.close_channel(self) + self.exchange_types = None + + def encode_body(self, body, encoding=None): + if encoding: + return self.codecs.get(encoding).encode(body), encoding + return body, encoding + + def decode_body(self, body, encoding=None): + if encoding: + return self.codecs.get(encoding).decode(body) + return body + + def _reset_cycle(self): + self._cycle = FairCycle(self._get, self._active_queues, Empty) + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + @property + def state(self): + """Broker state containing exchanges and bindings.""" + return self.connection.state + + @property + def qos(self): + """:class:`QoS` manager for this channel.""" + if self._qos is None: + self._qos = self.QoS(self) + return self._qos + + @property + def cycle(self): + if self._cycle is None: + self._reset_cycle() + return self._cycle + + +class Management(base.Management): + + def __init__(self, transport): + super(Management, self).__init__(transport) + self.channel = transport.client.channel() + + def get_bindings(self): + return [dict(destination=q, source=e, routing_key=r) + for q, e, r in self.channel.list_bindings()] + + def close(self): + self.channel.close() + + +class Transport(base.Transport): + """Virtual transport. + + :param client: :class:`~kombu.Connection` instance + + """ + Channel = Channel + Cycle = FairCycle + Management = Management + + #: :class:`BrokerState` containing declared exchanges and + #: bindings (set by constructor). + state = BrokerState() + + #: :class:`~kombu.transport.virtual.scheduling.FairCycle` instance + #: used to fairly drain events from channels (set by constructor). + cycle = None + + #: port number used when no port is specified. + default_port = None + + #: active channels. + channels = None + + #: queue/callback map. + _callbacks = None + + #: Time to sleep between unsuccessful polls. + polling_interval = 1.0 + + #: Max number of channels + channel_max = 65535 + + def __init__(self, client, **kwargs): + self.client = client + self.channels = [] + self._avail_channels = [] + self._callbacks = {} + self.cycle = self.Cycle(self._drain_channel, self.channels, Empty) + polling_interval = client.transport_options.get('polling_interval') + if polling_interval is not None: + self.polling_interval = polling_interval + self._avail_channel_ids = array( + ARRAY_TYPE_H, range(self.channel_max, 0, -1), + ) + + def create_channel(self, connection): + try: + return self._avail_channels.pop() + except IndexError: + channel = self.Channel(connection) + self.channels.append(channel) + return channel + + def close_channel(self, channel): + try: + self._avail_channel_ids.append(channel.channel_id) + try: + self.channels.remove(channel) + except ValueError: + pass + finally: + channel.connection = None + + def establish_connection(self): + # creates channel to verify connection. + # this channel is then used as the next requested channel. + # (returned by ``create_channel``). + self._avail_channels.append(self.create_channel(self)) + return self # for drain events + + def close_connection(self, connection): + self.cycle.close() + for l in self._avail_channels, self.channels: + while l: + try: + channel = l.pop() + except (IndexError, KeyError): # pragma: no cover + pass + else: + channel.close() + + def drain_events(self, connection, timeout=None): + loop = 0 + time_start = monotonic() + get = self.cycle.get + polling_interval = self.polling_interval + while 1: + try: + item, channel = get(timeout=timeout) + except Empty: + if timeout and monotonic() - time_start >= timeout: + raise socket.timeout() + loop += 1 + if polling_interval is not None: + sleep(polling_interval) + else: + break + + message, queue = item + + if not queue or queue not in self._callbacks: + raise KeyError( + 'Message for queue {0!r} without consumers: {1}'.format( + queue, message)) + + self._callbacks[queue](message) + + def _drain_channel(self, channel, timeout=None): + return channel.drain_events(timeout=timeout) + + @property + def default_connection_params(self): + return {'port': self.default_port, 'hostname': 'localhost'} diff --git a/kombu/transport/virtual/exchange.py b/kombu/transport/virtual/exchange.py new file mode 100644 index 0000000..c788a26 --- /dev/null +++ b/kombu/transport/virtual/exchange.py @@ -0,0 +1,134 @@ +""" +kombu.transport.virtual.exchange +================================ + +Implementations of the standard exchanges defined +by the AMQ protocol (excluding the `headers` exchange). + +""" +from __future__ import absolute_import + +from kombu.utils import escape_regex + +import re + + +class ExchangeType(object): + """Implements the specifics for an exchange type. + + :param channel: AMQ Channel + + """ + type = None + + def __init__(self, channel): + self.channel = channel + + def lookup(self, table, exchange, routing_key, default): + """Lookup all queues matching `routing_key` in `exchange`. + + :returns: `default` if no queues matched. + + """ + raise NotImplementedError('subclass responsibility') + + def prepare_bind(self, queue, exchange, routing_key, arguments): + """Return tuple of `(routing_key, regex, queue)` to be stored + for bindings to this exchange.""" + return routing_key, None, queue + + def equivalent(self, prev, exchange, type, + durable, auto_delete, arguments): + """Return true if `prev` and `exchange` is equivalent.""" + return (type == prev['type'] and + durable == prev['durable'] and + auto_delete == prev['auto_delete'] and + (arguments or {}) == (prev['arguments'] or {})) + + +class DirectExchange(ExchangeType): + """The `direct` exchange routes based on exact routing keys.""" + type = 'direct' + + def lookup(self, table, exchange, routing_key, default): + return [queue for rkey, _, queue in table + if rkey == routing_key] + + def deliver(self, message, exchange, routing_key, **kwargs): + _lookup = self.channel._lookup + _put = self.channel._put + for queue in _lookup(exchange, routing_key): + _put(queue, message, **kwargs) + + +class TopicExchange(ExchangeType): + """The `topic` exchange routes messages based on words separated by + dots, using wildcard characters ``*`` (any single word), and ``#`` + (one or more words).""" + type = 'topic' + + #: map of wildcard to regex conversions + wildcards = {'*': r'.*?[^\.]', + '#': r'.*?'} + + #: compiled regex cache + _compiled = {} + + def lookup(self, table, exchange, routing_key, default): + return [queue for rkey, pattern, queue in table + if self._match(pattern, routing_key)] + + def deliver(self, message, exchange, routing_key, **kwargs): + _lookup = self.channel._lookup + _put = self.channel._put + deadletter = self.channel.deadletter_queue + for queue in [q for q in _lookup(exchange, routing_key) + if q and q != deadletter]: + _put(queue, message, **kwargs) + + def prepare_bind(self, queue, exchange, routing_key, arguments): + return routing_key, self.key_to_pattern(routing_key), queue + + def key_to_pattern(self, rkey): + """Get the corresponding regex for any routing key.""" + return '^%s$' % ('\.'.join( + self.wildcards.get(word, word) + for word in escape_regex(rkey, '.#*').split('.') + )) + + def _match(self, pattern, string): + """Same as :func:`re.match`, except the regex is compiled and cached, + then reused on subsequent matches with the same pattern.""" + try: + compiled = self._compiled[pattern] + except KeyError: + compiled = self._compiled[pattern] = re.compile(pattern, re.U) + return compiled.match(string) + + +class FanoutExchange(ExchangeType): + """The `fanout` exchange implements broadcast messaging by delivering + copies of all messages to all queues bound to the exchange. + + To support fanout the virtual channel needs to store the table + as shared state. This requires that the `Channel.supports_fanout` + attribute is set to true, and the `Channel._queue_bind` and + `Channel.get_table` methods are implemented. See the redis backend + for an example implementation of these methods. + + """ + type = 'fanout' + + def lookup(self, table, exchange, routing_key, default): + return [queue for _, _, queue in table] + + def deliver(self, message, exchange, routing_key, **kwargs): + if self.channel.supports_fanout: + self.channel._put_fanout( + exchange, message, routing_key, **kwargs) + + +#: Map of standard exchange types and corresponding classes. +STANDARD_EXCHANGE_TYPES = {'direct': DirectExchange, + 'topic': TopicExchange, + 'fanout': FanoutExchange} diff --git a/kombu/transport/virtual/scheduling.py b/kombu/transport/virtual/scheduling.py new file mode 100644 index 0000000..bf92a3a --- /dev/null +++ b/kombu/transport/virtual/scheduling.py @@ -0,0 +1,49 @@ +""" + kombu.transport.virtual.scheduling + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Consumer utilities. + +""" +from __future__ import absolute_import + +from itertools import count + + +class FairCycle(object): + """Consume from a set of resources, where each resource gets + an equal chance to be consumed from.""" + + def __init__(self, fun, resources, predicate=Exception): + self.fun = fun + self.resources = resources + self.predicate = predicate + self.pos = 0 + + def _next(self): + while 1: + try: + resource = self.resources[self.pos] + self.pos += 1 + return resource + except IndexError: + self.pos = 0 + if not self.resources: + raise self.predicate() + + def get(self, **kwargs): + for tried in count(0): # for infinity + resource = self._next() + + try: + return self.fun(resource, **kwargs), resource + except self.predicate: + if tried >= len(self.resources) - 1: + raise + + def close(self): + pass + + def __repr__(self): + return ''.format( + self=self, size=len(self.resources)) diff --git a/kombu/transport/zmq.py b/kombu/transport/zmq.py new file mode 100644 index 0000000..e6b8a48 --- /dev/null +++ b/kombu/transport/zmq.py @@ -0,0 +1,314 @@ +""" +kombu.transport.zmq +=================== + +ZeroMQ transport. + +""" +from __future__ import absolute_import + +import errno +import os +import socket + +try: + import zmq + from zmq import ZMQError +except ImportError: + zmq = ZMQError = None # noqa + +from kombu.five import Empty +from kombu.log import get_logger +from kombu.serialization import pickle +from kombu.utils import cached_property +from kombu.utils.eventio import poll, READ + +from . import virtual + +logger = get_logger('kombu.transport.zmq') + +DEFAULT_PORT = 5555 +DEFAULT_HWM = 128 +DEFAULT_INCR = 1 + +dumps, loads = pickle.dumps, pickle.loads + + +class MultiChannelPoller(object): + eventflags = READ + + def __init__(self): + # active channels + self._channels = set() + # file descriptor -> channel map + self._fd_to_chan = {} + # poll implementation (epoll/kqueue/select) + self.poller = poll() + + def close(self): + for fd in self._fd_to_chan: + try: + self.poller.unregister(fd) + except KeyError: + pass + self._channels.clear() + self._fd_to_chan.clear() + self.poller = None + + def add(self, channel): + self._channels.add(channel) + + def discard(self, channel): + self._channels.discard(channel) + self._fd_to_chan.pop(channel.client.connection.fd, None) + + def _register(self, channel): + conn = channel.client.connection + self._fd_to_chan[conn.fd] = channel + self.poller.register(conn.fd, self.eventflags) + + def on_poll_start(self): + for channel in self._channels: + self._register(channel) + + def on_readable(self, fileno): + chan = self._fd_to_chan[fileno] + return chan.drain_events(), chan + + def get(self, timeout=None): + self.on_poll_start() + + events = self.poller.poll(timeout) + for fileno, _ in events or []: + return self.on_readable(fileno) + + raise Empty() + + @property + def fds(self): + return self._fd_to_chan + + +class Client(object): + + def __init__(self, uri='tcp://127.0.0.1', port=DEFAULT_PORT, + hwm=DEFAULT_HWM, swap_size=None, enable_sink=True, + context=None): + try: + scheme, parts = uri.split('://') + except ValueError: + scheme = 'tcp' + parts = uri + endpoints = parts.split(';') + self.port = port + + if scheme != 'tcp': + raise NotImplementedError('Currently only TCP can be used') + + self.context = context or zmq.Context.instance() + + if enable_sink: + self.sink = self.context.socket(zmq.PULL) + self.sink.bind('tcp://*:{0.port}'.format(self)) + else: + self.sink = None + + self.vent = self.context.socket(zmq.PUSH) + + if hasattr(zmq, 'SNDHWM'): + self.vent.setsockopt(zmq.SNDHWM, hwm) + else: + self.vent.setsockopt(zmq.HWM, hwm) + + if swap_size: + self.vent.setsockopt(zmq.SWAP, swap_size) + + for endpoint in endpoints: + if scheme == 'tcp' and ':' not in endpoint: + endpoint += ':' + str(DEFAULT_PORT) + + endpoint = ''.join([scheme, '://', endpoint]) + + self.connect(endpoint) + + def connect(self, endpoint): + self.vent.connect(endpoint) + + def get(self, queue=None, timeout=None): + sink = self.sink + try: + if timeout is not None: + prev_timeout, sink.RCVTIMEO = sink.RCVTIMEO, timeout + try: + return sink.recv() + finally: + sink.RCVTIMEO = prev_timeout + else: + return sink.recv() + except ZMQError as exc: + if exc.errno == zmq.EAGAIN: + raise socket.error(errno.EAGAIN, exc.strerror) + else: + raise + + def put(self, queue, message, **kwargs): + return self.vent.send(message) + + def close(self): + if self.sink and not self.sink.closed: + self.sink.close() + if not self.vent.closed: + self.vent.close() + + @property + def connection(self): + if self.sink: + return self.sink + return self.vent + + +class Channel(virtual.Channel): + Client = Client + + hwm = DEFAULT_HWM + swap_size = None + enable_sink = True + port_incr = DEFAULT_INCR + + from_transport_options = ( + virtual.Channel.from_transport_options + + ('hwm', 'swap_size', 'enable_sink', 'port_incr') + ) + + def __init__(self, *args, **kwargs): + super_ = super(Channel, self) + super_.__init__(*args, **kwargs) + + # Evaluate socket + self.client.connection.closed + + self.connection.cycle.add(self) + self.connection_errors = self.connection.connection_errors + + def _get(self, queue, timeout=None): + try: + return loads(self.client.get(queue, timeout)) + except socket.error as exc: + if exc.errno == errno.EAGAIN and timeout != 0: + raise Empty() + else: + raise + + def _put(self, queue, message, **kwargs): + self.client.put(queue, dumps(message, -1), **kwargs) + + def _purge(self, queue): + return 0 + + def _poll(self, cycle, timeout=None): + return cycle.get(timeout=timeout) + + def close(self): + if not self.closed: + self.connection.cycle.discard(self) + try: + self.__dict__['client'].close() + except KeyError: + pass + super(Channel, self).close() + + def _prepare_port(self, port): + return (port + self.channel_id - 1) * self.port_incr + + def _create_client(self): + conninfo = self.connection.client + port = self._prepare_port(conninfo.port or DEFAULT_PORT) + return self.Client(uri=conninfo.hostname or 'tcp://127.0.0.1', + port=port, + hwm=self.hwm, + swap_size=self.swap_size, + enable_sink=self.enable_sink, + context=self.connection.context) + + @cached_property + def client(self): + return self._create_client() + + +class Transport(virtual.Transport): + Channel = Channel + + can_parse_url = True + default_port = DEFAULT_PORT + driver_type = 'zeromq' + driver_name = 'zmq' + + connection_errors = virtual.Transport.connection_errors + (ZMQError, ) + + supports_ev = True + polling_interval = None + + def __init__(self, *args, **kwargs): + if zmq is None: + raise ImportError('The zmq library is not installed') + super(Transport, self).__init__(*args, **kwargs) + self.cycle = MultiChannelPoller() + + def driver_version(self): + return zmq.__version__ + + def register_with_event_loop(self, connection, loop): + cycle = self.cycle + cycle.poller = loop.poller + add_reader = loop.add_reader + on_readable = self.on_readable + + cycle_poll_start = cycle.on_poll_start + + def on_poll_start(): + cycle_poll_start() + [add_reader(fd, on_readable, fd) for fd in cycle.fds] + + loop.on_tick.add(on_poll_start) + + def on_readable(self, fileno): + self._handle_event(self.cycle.on_readable(fileno)) + + def drain_events(self, connection, timeout=None): + more_to_read = False + for channel in connection.channels: + try: + evt = channel.cycle.get(timeout=timeout) + except socket.error as exc: + if exc.errno == errno.EAGAIN: + continue + raise + else: + connection._handle_event((evt, channel)) + more_to_read = True + if not more_to_read: + raise socket.error(errno.EAGAIN, os.strerror(errno.EAGAIN)) + + def _handle_event(self, evt): + item, channel = evt + message, queue = item + if not queue or queue not in self._callbacks: + raise KeyError( + 'Message for queue {0!r} without consumers: {1}'.format( + queue, message)) + self._callbacks[queue](message) + + def establish_connection(self): + self.context.closed + return super(Transport, self).establish_connection() + + def close_connection(self, connection): + super(Transport, self).close_connection(connection) + try: + connection.__dict__['context'].term() + except KeyError: + pass + + @cached_property + def context(self): + return zmq.Context(1) diff --git a/kombu/transport/zookeeper.py b/kombu/transport/zookeeper.py new file mode 100644 index 0000000..2d1c8ab --- /dev/null +++ b/kombu/transport/zookeeper.py @@ -0,0 +1,188 @@ +""" +kombu.transport.zookeeper +========================= + +Zookeeper transport. + +:copyright: (c) 2010 - 2013 by Mahendra M. +:license: BSD, see LICENSE for more details. + +**Synopsis** + +Connects to a zookeeper node as :/ +The becomes the base for all the other znodes. So we can use +it like a vhost. + +This uses the built-in kazoo recipe for queues + +**References** + +- https://zookeeper.apache.org/doc/trunk/recipes.html#sc_recipes_Queues +- https://kazoo.readthedocs.org/en/latest/api/recipe/queue.html + +**Limitations** +This queue does not offer reliable consumption. An entry is removed from +the queue prior to being processed. So if an error occurs, the consumer +has to re-queue the item or it will be lost. +""" +from __future__ import absolute_import + +import os +import socket + +from anyjson import loads, dumps + +from kombu.five import Empty +from kombu.utils.encoding import bytes_to_str + +from . import virtual + +MAX_PRIORITY = 9 + +try: + import kazoo + from kazoo.client import KazooClient + from kazoo.recipe.queue import Queue + + KZ_CONNECTION_ERRORS = ( + kazoo.exceptions.SystemErrorException, + kazoo.exceptions.ConnectionLossException, + kazoo.exceptions.MarshallingErrorException, + kazoo.exceptions.UnimplementedException, + kazoo.exceptions.OperationTimeoutException, + kazoo.exceptions.NoAuthException, + kazoo.exceptions.InvalidACLException, + kazoo.exceptions.AuthFailedException, + kazoo.exceptions.SessionExpiredException, + ) + + KZ_CHANNEL_ERRORS = ( + kazoo.exceptions.RuntimeInconsistencyException, + kazoo.exceptions.DataInconsistencyException, + kazoo.exceptions.BadArgumentsException, + kazoo.exceptions.MarshallingErrorException, + kazoo.exceptions.UnimplementedException, + kazoo.exceptions.OperationTimeoutException, + kazoo.exceptions.ApiErrorException, + kazoo.exceptions.NoNodeException, + kazoo.exceptions.NoAuthException, + kazoo.exceptions.NodeExistsException, + kazoo.exceptions.NoChildrenForEphemeralsException, + kazoo.exceptions.NotEmptyException, + kazoo.exceptions.SessionExpiredException, + kazoo.exceptions.InvalidCallbackException, + socket.error, + ) +except ImportError: + kazoo = None # noqa + KZ_CONNECTION_ERRORS = KZ_CHANNEL_ERRORS = () # noqa + +DEFAULT_PORT = 2181 + +__author__ = 'Mahendra M ' + + +class Channel(virtual.Channel): + + _client = None + _queues = {} + + def _get_path(self, queue_name): + return os.path.join(self.vhost, queue_name) + + def _get_queue(self, queue_name): + queue = self._queues.get(queue_name, None) + + if queue is None: + queue = Queue(self.client, self._get_path(queue_name)) + self._queues[queue_name] = queue + + # Ensure that the queue is created + len(queue) + + return queue + + def _put(self, queue, message, **kwargs): + try: + priority = message['properties']['delivery_info']['priority'] + except KeyError: + priority = 0 + + queue = self._get_queue(queue) + queue.put(dumps(message), priority=(MAX_PRIORITY - priority)) + + def _get(self, queue): + queue = self._get_queue(queue) + msg = queue.get() + + if msg is None: + raise Empty() + + return loads(bytes_to_str(msg)) + + def _purge(self, queue): + count = 0 + queue = self._get_queue(queue) + + while True: + msg = queue.get() + if msg is None: + break + count += 1 + + return count + + def _delete(self, queue, *args, **kwargs): + if self._has_queue(queue): + self._purge(queue) + self.client.delete(self._get_path(queue)) + + def _size(self, queue): + queue = self._get_queue(queue) + return len(queue) + + def _new_queue(self, queue, **kwargs): + if not self._has_queue(queue): + queue = self._get_queue(queue) + + def _has_queue(self, queue): + return self.client.exists(self._get_path(queue)) is not None + + def _open(self): + conninfo = self.connection.client + port = conninfo.port or DEFAULT_PORT + conn_str = '%s:%s' % (conninfo.hostname, port) + self.vhost = os.path.join('/', conninfo.virtual_host[0:-1]) + + conn = KazooClient(conn_str) + conn.start() + return conn + + @property + def client(self): + if self._client is None: + self._client = self._open() + return self._client + + +class Transport(virtual.Transport): + Channel = Channel + polling_interval = 1 + default_port = DEFAULT_PORT + connection_errors = ( + virtual.Transport.connection_errors + KZ_CONNECTION_ERRORS + ) + channel_errors = ( + virtual.Transport.channel_errors + KZ_CHANNEL_ERRORS + ) + driver_type = 'zookeeper' + driver_name = 'kazoo' + + def __init__(self, *args, **kwargs): + if kazoo is None: + raise ImportError('The kazoo library is not installed') + + super(Transport, self).__init__(*args, **kwargs) + + def driver_version(self): + return kazoo.__version__ diff --git a/kombu/utils/__init__.py b/kombu/utils/__init__.py new file mode 100644 index 0000000..0745ddf --- /dev/null +++ b/kombu/utils/__init__.py @@ -0,0 +1,450 @@ +""" +kombu.utils +=========== + +Internal utilities. + +""" +from __future__ import absolute_import, print_function + +import importlib +import numbers +import random +import sys + +from contextlib import contextmanager +from itertools import count, repeat +from functools import wraps +from time import sleep +from uuid import UUID, uuid4 as _uuid4, _uuid_generate_random + +from kombu.five import items, reraise, string_t + +from .encoding import default_encode, safe_repr as _safe_repr + +try: + import ctypes +except: + ctypes = None # noqa + +try: + from io import UnsupportedOperation + FILENO_ERRORS = (AttributeError, ValueError, UnsupportedOperation) +except ImportError: # pragma: no cover + # Py2 + FILENO_ERRORS = (AttributeError, ValueError) # noqa + + +__all__ = ['EqualityDict', 'say', 'uuid', 'kwdict', 'maybe_list', + 'fxrange', 'fxrangemax', 'retry_over_time', + 'emergency_dump_state', 'cached_property', + 'reprkwargs', 'reprcall', 'nested', 'fileno', 'maybe_fileno'] + + +def symbol_by_name(name, aliases={}, imp=None, package=None, + sep='.', default=None, **kwargs): + """Get symbol by qualified name. + + The name should be the full dot-separated path to the class:: + + modulename.ClassName + + Example:: + + celery.concurrency.processes.TaskPool + ^- class name + + or using ':' to separate module and symbol:: + + celery.concurrency.processes:TaskPool + + If `aliases` is provided, a dict containing short name/long name + mappings, the name is looked up in the aliases first. + + Examples: + + >>> symbol_by_name('celery.concurrency.processes.TaskPool') + + + >>> symbol_by_name('default', { + ... 'default': 'celery.concurrency.processes.TaskPool'}) + + + # Does not try to look up non-string names. + >>> from celery.concurrency.processes import TaskPool + >>> symbol_by_name(TaskPool) is TaskPool + True + + """ + if imp is None: + imp = importlib.import_module + + if not isinstance(name, string_t): + return name # already a class + + name = aliases.get(name) or name + sep = ':' if ':' in name else sep + module_name, _, cls_name = name.rpartition(sep) + if not module_name: + cls_name, module_name = None, package if package else cls_name + try: + try: + module = imp(module_name, package=package, **kwargs) + except ValueError as exc: + reraise(ValueError, + ValueError("Couldn't import {0!r}: {1}".format(name, exc)), + sys.exc_info()[2]) + return getattr(module, cls_name) if cls_name else module + except (ImportError, AttributeError): + if default is None: + raise + return default + + +class HashedSeq(list): + """type used for hash() to make sure the hash is not generated + multiple times.""" + __slots__ = 'hashvalue' + + def __init__(self, *seq): + self[:] = seq + self.hashvalue = hash(seq) + + def __hash__(self): + return self.hashvalue + + +def eqhash(o): + try: + return o.__eqhash__() + except AttributeError: + return hash(o) + + +class EqualityDict(dict): + + def __getitem__(self, key): + h = eqhash(key) + if h not in self: + return self.__missing__(key) + return dict.__getitem__(self, h) + + def __setitem__(self, key, value): + return dict.__setitem__(self, eqhash(key), value) + + def __delitem__(self, key): + return dict.__delitem__(self, eqhash(key)) + + +def say(m, *fargs, **fkwargs): + print(str(m).format(*fargs, **fkwargs), file=sys.stderr) + + +def uuid4(): + # Workaround for http://bugs.python.org/issue4607 + if ctypes and _uuid_generate_random: # pragma: no cover + buffer = ctypes.create_string_buffer(16) + _uuid_generate_random(buffer) + return UUID(bytes=buffer.raw) + return _uuid4() + + +def uuid(): + """Generate a unique id, having - hopefully - a very small chance of + collision. + + For now this is provided by :func:`uuid.uuid4`. + """ + return str(uuid4()) +gen_unique_id = uuid + + +if sys.version_info >= (2, 6, 5): + + def kwdict(kwargs): + return kwargs +else: + def kwdict(kwargs): # pragma: no cover # noqa + """Make sure keyword arguments are not in Unicode. + + This should be fixed in newer Python versions, + see: http://bugs.python.org/issue4978. + + """ + return dict((key.encode('utf-8'), value) + for key, value in items(kwargs)) + + +def maybe_list(v): + if v is None: + return [] + if hasattr(v, '__iter__'): + return v + return [v] + + +def fxrange(start=1.0, stop=None, step=1.0, repeatlast=False): + cur = start * 1.0 + while 1: + if not stop or cur <= stop: + yield cur + cur += step + else: + if not repeatlast: + break + yield cur - step + + +def fxrangemax(start=1.0, stop=None, step=1.0, max=100.0): + sum_, cur = 0, start * 1.0 + while 1: + if sum_ >= max: + break + yield cur + if stop: + cur = min(cur + step, stop) + else: + cur += step + sum_ += cur + + +def retry_over_time(fun, catch, args=[], kwargs={}, errback=None, + max_retries=None, interval_start=2, interval_step=2, + interval_max=30, callback=None): + """Retry the function over and over until max retries is exceeded. + + For each retry we sleep a for a while before we try again, this interval + is increased for every retry until the max seconds is reached. + + :param fun: The function to try + :param catch: Exceptions to catch, can be either tuple or a single + exception class. + :keyword args: Positional arguments passed on to the function. + :keyword kwargs: Keyword arguments passed on to the function. + :keyword errback: Callback for when an exception in ``catch`` is raised. + The callback must take two arguments: ``exc`` and ``interval``, where + ``exc`` is the exception instance, and ``interval`` is the time in + seconds to sleep next.. + :keyword max_retries: Maximum number of retries before we give up. + If this is not set, we will retry forever. + :keyword interval_start: How long (in seconds) we start sleeping between + retries. + :keyword interval_step: By how much the interval is increased for each + retry. + :keyword interval_max: Maximum number of seconds to sleep between retries. + + """ + retries = 0 + interval_range = fxrange(interval_start, + interval_max + interval_start, + interval_step, repeatlast=True) + for retries in count(): + try: + return fun(*args, **kwargs) + except catch as exc: + if max_retries and retries >= max_retries: + raise + if callback: + callback() + tts = float(errback(exc, interval_range, retries) if errback + else next(interval_range)) + if tts: + for _ in range(int(tts)): + if callback: + callback() + sleep(1.0) + # sleep remainder after int truncation above. + sleep(abs(int(tts) - tts)) + + +def emergency_dump_state(state, open_file=open, dump=None): + from pprint import pformat + from tempfile import mktemp + + if dump is None: + import pickle + dump = pickle.dump + persist = mktemp() + say('EMERGENCY DUMP STATE TO FILE -> {0} <-', persist) + fh = open_file(persist, 'w') + try: + try: + dump(state, fh, protocol=0) + except Exception as exc: + say('Cannot pickle state: {0!r}. Fallback to pformat.', exc) + fh.write(default_encode(pformat(state))) + finally: + fh.flush() + fh.close() + return persist + + +class cached_property(object): + """Property descriptor that caches the return value + of the get function. + + *Examples* + + .. code-block:: python + + @cached_property + def connection(self): + return Connection() + + @connection.setter # Prepares stored value + def connection(self, value): + if value is None: + raise TypeError('Connection must be a connection') + return value + + @connection.deleter + def connection(self, value): + # Additional action to do at del(self.attr) + if value is not None: + print('Connection {0!r} deleted'.format(value) + + """ + + def __init__(self, fget=None, fset=None, fdel=None, doc=None): + self.__get = fget + self.__set = fset + self.__del = fdel + self.__doc__ = doc or fget.__doc__ + self.__name__ = fget.__name__ + self.__module__ = fget.__module__ + + def __get__(self, obj, type=None): + if obj is None: + return self + try: + return obj.__dict__[self.__name__] + except KeyError: + value = obj.__dict__[self.__name__] = self.__get(obj) + return value + + def __set__(self, obj, value): + if obj is None: + return self + if self.__set is not None: + value = self.__set(obj, value) + obj.__dict__[self.__name__] = value + + def __delete__(self, obj): + if obj is None: + return self + try: + value = obj.__dict__.pop(self.__name__) + except KeyError: + pass + else: + if self.__del is not None: + self.__del(obj, value) + + def setter(self, fset): + return self.__class__(self.__get, fset, self.__del) + + def deleter(self, fdel): + return self.__class__(self.__get, self.__set, fdel) + + +def reprkwargs(kwargs, sep=', ', fmt='{0}={1}'): + return sep.join(fmt.format(k, _safe_repr(v)) for k, v in items(kwargs)) + + +def reprcall(name, args=(), kwargs={}, sep=', '): + return '{0}({1}{2}{3})'.format( + name, sep.join(map(_safe_repr, args or ())), + (args and kwargs) and sep or '', + reprkwargs(kwargs, sep), + ) + + +@contextmanager +def nested(*managers): # pragma: no cover + # flake8: noqa + """Combine multiple context managers into a single nested + context manager.""" + exits = [] + vars = [] + exc = (None, None, None) + try: + try: + for mgr in managers: + exit = mgr.__exit__ + enter = mgr.__enter__ + vars.append(enter()) + exits.append(exit) + yield vars + except: + exc = sys.exc_info() + finally: + while exits: + exit = exits.pop() + try: + if exit(*exc): + exc = (None, None, None) + except: + exc = sys.exc_info() + if exc != (None, None, None): + # Don't rely on sys.exc_info() still containing + # the right information. Another exception may + # have been raised and caught by an exit method + reraise(exc[0], exc[1], exc[2]) + finally: + del(exc) + + +def shufflecycle(it): + it = list(it) # don't modify callers list + shuffle = random.shuffle + for _ in repeat(None): + shuffle(it) + yield it[0] + + +def entrypoints(namespace): + try: + from pkg_resources import iter_entry_points + except ImportError: + return iter([]) + return ((ep, ep.load()) for ep in iter_entry_points(namespace)) + + +class ChannelPromise(object): + + def __init__(self, contract): + self.__contract__ = contract + + def __call__(self): + try: + return self.__value__ + except AttributeError: + value = self.__value__ = self.__contract__() + return value + + def __repr__(self): + try: + return repr(self.__value__) + except AttributeError: + return ''.format(id(self.__contract__)) + + +def escape_regex(p, white=''): + # what's up with re.escape? that code must be neglected or someting + return ''.join(c if c.isalnum() or c in white + else ('\\000' if c == '\000' else '\\' + c) + for c in p) + + +def fileno(f): + if isinstance(f, numbers.Integral): + return f + return f.fileno() + + +def maybe_fileno(f): + """Get object fileno, or :const:`None` if not defined.""" + try: + return fileno(f) + except FILENO_ERRORS: + pass diff --git a/kombu/utils/amq_manager.py b/kombu/utils/amq_manager.py new file mode 100644 index 0000000..7da5490 --- /dev/null +++ b/kombu/utils/amq_manager.py @@ -0,0 +1,18 @@ +from __future__ import absolute_import + + +def get_manager(client, hostname=None, port=None, userid=None, + password=None): + import pyrabbit + opt = client.transport_options.get + + def get(name, val, default): + return (val if val is not None + else opt('manager_%s' % name) + or getattr(client, name, None) or default) + + host = get('hostname', hostname, 'localhost') + port = port if port is not None else opt('manager_port', 15672) + userid = get('userid', userid, 'guest') + password = get('password', password, 'guest') + return pyrabbit.Client('%s:%s' % (host, port), userid, password) diff --git a/kombu/utils/compat.py b/kombu/utils/compat.py new file mode 100644 index 0000000..d0c3e67 --- /dev/null +++ b/kombu/utils/compat.py @@ -0,0 +1,60 @@ +""" +kombu.utils.compat +================== + +Helps compatibility with older Python versions. + +""" +from __future__ import absolute_import + + +# ############# timedelta_seconds() -> delta.total_seconds ################### +from datetime import timedelta + +HAVE_TIMEDELTA_TOTAL_SECONDS = hasattr(timedelta, 'total_seconds') + + +if HAVE_TIMEDELTA_TOTAL_SECONDS: # pragma: no cover + + def timedelta_seconds(delta): + """Convert :class:`datetime.timedelta` to seconds. + + Doesn't account for negative values. + + """ + return max(delta.total_seconds(), 0) + +else: # pragma: no cover + + def timedelta_seconds(delta): # noqa + """Convert :class:`datetime.timedelta` to seconds. + + Doesn't account for negative values. + + """ + if delta.days < 0: + return 0 + return delta.days * 86400 + delta.seconds + (delta.microseconds / 10e5) + +# ############# socket.error.errno ########################################### + + +def get_errno(exc): + """:exc:`socket.error` and :exc:`IOError` first got + the ``.errno`` attribute in Py2.7""" + try: + return exc.errno + except AttributeError: + try: + # e.args = (errno, reason) + if isinstance(exc.args, tuple) and len(exc.args) == 2: + return exc.args[0] + except AttributeError: + pass + return 0 + +# ############# collections.OrderedDict ###################################### +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict # noqa diff --git a/kombu/utils/debug.py b/kombu/utils/debug.py new file mode 100644 index 0000000..8d08115 --- /dev/null +++ b/kombu/utils/debug.py @@ -0,0 +1,65 @@ +""" +kombu.utils.debug +================= + +Debugging support. + +""" +from __future__ import absolute_import + +import logging + +from functools import wraps + +from kombu.five import items +from kombu.log import get_logger + +__all__ = ['setup_logging', 'Logwrapped'] + + +def setup_logging(loglevel=logging.DEBUG, loggers=['kombu.connection', + 'kombu.channel']): + for logger in loggers: + l = get_logger(logger) + l.addHandler(logging.StreamHandler()) + l.setLevel(loglevel) + + +class Logwrapped(object): + __ignore = ('__enter__', '__exit__') + + def __init__(self, instance, logger=None, ident=None): + self.instance = instance + self.logger = get_logger(logger) + self.ident = ident + + def __getattr__(self, key): + meth = getattr(self.instance, key) + + if not callable(meth) or key in self.__ignore: + return meth + + @wraps(meth) + def __wrapped(*args, **kwargs): + info = '' + if self.ident: + info += self.ident.format(self.instance) + info += '{0.__name__}('.format(meth) + if args: + info += ', '.join(map(repr, args)) + if kwargs: + if args: + info += ', ' + info += ', '.join('{k}={v!r}'.format(k=key, v=value) + for key, value in items(kwargs)) + info += ')' + self.logger.debug(info) + return meth(*args, **kwargs) + + return __wrapped + + def __repr__(self): + return repr(self.instance) + + def __dir__(self): + return dir(self.instance) diff --git a/kombu/utils/encoding.py b/kombu/utils/encoding.py new file mode 100644 index 0000000..d054257 --- /dev/null +++ b/kombu/utils/encoding.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +""" +kombu.utils.encoding +~~~~~~~~~~~~~~~~~~~~~ + +Utilities to encode text, and to safely emit text from running +applications without crashing with the infamous :exc:`UnicodeDecodeError` +exception. + +""" +from __future__ import absolute_import + +import sys +import traceback + +from kombu.five import text_t + +is_py3k = sys.version_info >= (3, 0) + +#: safe_str takes encoding from this file by default. +#: :func:`set_default_encoding_file` can used to set the +#: default output file. +default_encoding_file = None + + +def set_default_encoding_file(file): + global default_encoding_file + default_encoding_file = file + + +def get_default_encoding_file(): + return default_encoding_file + + +if sys.platform.startswith('java'): # pragma: no cover + + def default_encoding(file=None): + return 'utf-8' +else: + + def default_encoding(file=None): # noqa + file = file or get_default_encoding_file() + return getattr(file, 'encoding', None) or sys.getfilesystemencoding() + +if is_py3k: # pragma: no cover + + def str_to_bytes(s): + if isinstance(s, str): + return s.encode() + return s + + def bytes_to_str(s): + if isinstance(s, bytes): + return s.decode() + return s + + def from_utf8(s, *args, **kwargs): + return s + + def ensure_bytes(s): + if not isinstance(s, bytes): + return str_to_bytes(s) + return s + + def default_encode(obj): + return obj + + str_t = str + +else: + + def str_to_bytes(s): # noqa + if isinstance(s, unicode): + return s.encode() + return s + + def bytes_to_str(s): # noqa + return s + + def from_utf8(s, *args, **kwargs): # noqa + return s.encode('utf-8', *args, **kwargs) + + def default_encode(obj, file=None): # noqa + return unicode(obj, default_encoding(file)) + + str_t = unicode + ensure_bytes = str_to_bytes + + +try: + bytes_t = bytes +except NameError: # pragma: no cover + bytes_t = str # noqa + + +def safe_str(s, errors='replace'): + s = bytes_to_str(s) + if not isinstance(s, (text_t, bytes)): + return safe_repr(s, errors) + return _safe_str(s, errors) + + +if is_py3k: + + def _safe_str(s, errors='replace', file=None): + if isinstance(s, str): + return s + try: + return str(s) + except Exception as exc: + return ''.format( + type(s), exc, '\n'.join(traceback.format_stack())) +else: + def _safe_str(s, errors='replace', file=None): # noqa + encoding = default_encoding(file) + try: + if isinstance(s, unicode): + return s.encode(encoding, errors) + return unicode(s, encoding, errors) + except Exception as exc: + return ''.format( + type(s), exc, '\n'.join(traceback.format_stack())) + + +def safe_repr(o, errors='replace'): + try: + return repr(o) + except Exception: + return _safe_str(o, errors) diff --git a/kombu/utils/eventio.py b/kombu/utils/eventio.py new file mode 100644 index 0000000..e4961cd --- /dev/null +++ b/kombu/utils/eventio.py @@ -0,0 +1,265 @@ +""" +kombu.utils.eventio +=================== + +Evented IO support for multiple platforms. + +""" +from __future__ import absolute_import + +import errno +import select as __select__ +import socket + +from numbers import Integral + +_selectf = __select__.select +_selecterr = __select__.error +epoll = getattr(__select__, 'epoll', None) +kqueue = getattr(__select__, 'kqueue', None) +kevent = getattr(__select__, 'kevent', None) +KQ_EV_ADD = getattr(__select__, 'KQ_EV_ADD', 1) +KQ_EV_DELETE = getattr(__select__, 'KQ_EV_DELETE', 2) +KQ_EV_ENABLE = getattr(__select__, 'KQ_EV_ENABLE', 4) +KQ_EV_CLEAR = getattr(__select__, 'KQ_EV_CLEAR', 32) +KQ_EV_ERROR = getattr(__select__, 'KQ_EV_ERROR', 16384) +KQ_EV_EOF = getattr(__select__, 'KQ_EV_EOF', 32768) +KQ_FILTER_READ = getattr(__select__, 'KQ_FILTER_READ', -1) +KQ_FILTER_WRITE = getattr(__select__, 'KQ_FILTER_WRITE', -2) +KQ_FILTER_AIO = getattr(__select__, 'KQ_FILTER_AIO', -3) +KQ_FILTER_VNODE = getattr(__select__, 'KQ_FILTER_VNODE', -4) +KQ_FILTER_PROC = getattr(__select__, 'KQ_FILTER_PROC', -5) +KQ_FILTER_SIGNAL = getattr(__select__, 'KQ_FILTER_SIGNAL', -6) +KQ_FILTER_TIMER = getattr(__select__, 'KQ_FILTER_TIMER', -7) +KQ_NOTE_LOWAT = getattr(__select__, 'KQ_NOTE_LOWAT', 1) +KQ_NOTE_DELETE = getattr(__select__, 'KQ_NOTE_DELETE', 1) +KQ_NOTE_WRITE = getattr(__select__, 'KQ_NOTE_WRITE', 2) +KQ_NOTE_EXTEND = getattr(__select__, 'KQ_NOTE_EXTEND', 4) +KQ_NOTE_ATTRIB = getattr(__select__, 'KQ_NOTE_ATTRIB', 8) +KQ_NOTE_LINK = getattr(__select__, 'KQ_NOTE_LINK', 16) +KQ_NOTE_RENAME = getattr(__select__, 'KQ_NOTE_RENAME', 32) +KQ_NOTE_REVOKE = getattr(__select__, 'kQ_NOTE_REVOKE', 64) + + +from kombu.syn import detect_environment + +from . import fileno +from .compat import get_errno + +__all__ = ['poll'] + +READ = POLL_READ = 0x001 +WRITE = POLL_WRITE = 0x004 +ERR = POLL_ERR = 0x008 | 0x010 + +try: + SELECT_BAD_FD = set((errno.EBADF, errno.WSAENOTSOCK)) +except AttributeError: + SELECT_BAD_FD = set((errno.EBADF,)) + + +class Poller(object): + + def poll(self, timeout): + try: + return self._poll(timeout) + except Exception as exc: + if get_errno(exc) != errno.EINTR: + raise + + +class _epoll(Poller): + + def __init__(self): + self._epoll = epoll() + + def register(self, fd, events): + try: + self._epoll.register(fd, events) + except Exception as exc: + if get_errno(exc) != errno.EEXIST: + raise + + def unregister(self, fd): + try: + self._epoll.unregister(fd) + except (socket.error, ValueError, KeyError, TypeError): + pass + except (IOError, OSError) as exc: + if get_errno(exc) != errno.ENOENT: + raise + + def _poll(self, timeout): + return self._epoll.poll(timeout if timeout is not None else -1) + + def close(self): + self._epoll.close() + + +class _kqueue(Poller): + w_fflags = (KQ_NOTE_WRITE | KQ_NOTE_EXTEND | + KQ_NOTE_ATTRIB | KQ_NOTE_DELETE) + + def __init__(self): + self._kqueue = kqueue() + self._active = {} + self.on_file_change = None + self._kcontrol = self._kqueue.control + + def register(self, fd, events): + self._control(fd, events, KQ_EV_ADD) + self._active[fd] = events + + def unregister(self, fd): + events = self._active.pop(fd, None) + if events: + try: + self._control(fd, events, KQ_EV_DELETE) + except socket.error: + pass + + def watch_file(self, fd): + ev = kevent(fd, + filter=KQ_FILTER_VNODE, + flags=KQ_EV_ADD | KQ_EV_ENABLE | KQ_EV_CLEAR, + fflags=self.w_fflags) + self._kcontrol([ev], 0) + + def unwatch_file(self, fd): + ev = kevent(fd, + filter=KQ_FILTER_VNODE, + flags=KQ_EV_DELETE, + fflags=self.w_fflags) + self._kcontrol([ev], 0) + + def _control(self, fd, events, flags): + if not events: + return + kevents = [] + if events & WRITE: + kevents.append(kevent(fd, + filter=KQ_FILTER_WRITE, + flags=flags)) + if not kevents or events & READ: + kevents.append( + kevent(fd, filter=KQ_FILTER_READ, flags=flags), + ) + control = self._kcontrol + for e in kevents: + try: + control([e], 0) + except ValueError: + pass + + def _poll(self, timeout): + kevents = self._kcontrol(None, 1000, timeout) + events, file_changes = {}, [] + for k in kevents: + fd = k.ident + if k.filter == KQ_FILTER_READ: + events[fd] = events.get(fd, 0) | READ + elif k.filter == KQ_FILTER_WRITE: + if k.flags & KQ_EV_EOF: + events[fd] = ERR + else: + events[fd] = events.get(fd, 0) | WRITE + elif k.filter == KQ_EV_ERROR: + events[fd] = events.get(fd, 0) | ERR + elif k.filter == KQ_FILTER_VNODE: + if k.fflags & KQ_NOTE_DELETE: + self.unregister(fd) + file_changes.append(k) + if file_changes: + self.on_file_change(file_changes) + return list(events.items()) + + def close(self): + self._kqueue.close() + + +class _select(Poller): + + def __init__(self): + self._all = (self._rfd, + self._wfd, + self._efd) = set(), set(), set() + + def register(self, fd, events): + fd = fileno(fd) + if events & ERR: + self._efd.add(fd) + if events & WRITE: + self._wfd.add(fd) + if events & READ: + self._rfd.add(fd) + + def _remove_bad(self): + for fd in self._rfd | self._wfd | self._efd: + try: + _selectf([fd], [], [], 0) + except (_selecterr, socket.error) as exc: + if get_errno(exc) in SELECT_BAD_FD: + self.unregister(fd) + + def unregister(self, fd): + try: + fd = fileno(fd) + except socket.error as exc: + # we don't know the previous fd of this object + # but it will be removed by the next poll iteration. + if get_errno(exc) in SELECT_BAD_FD: + return + raise + self._rfd.discard(fd) + self._wfd.discard(fd) + self._efd.discard(fd) + + def _poll(self, timeout): + try: + read, write, error = _selectf( + self._rfd, self._wfd, self._efd, timeout, + ) + except (_selecterr, socket.error) as exc: + if get_errno(exc) == errno.EINTR: + return + elif get_errno(exc) in SELECT_BAD_FD: + return self._remove_bad() + raise + + events = {} + for fd in read: + if not isinstance(fd, Integral): + fd = fd.fileno() + events[fd] = events.get(fd, 0) | READ + for fd in write: + if not isinstance(fd, Integral): + fd = fd.fileno() + events[fd] = events.get(fd, 0) | WRITE + for fd in error: + if not isinstance(fd, Integral): + fd = fd.fileno() + events[fd] = events.get(fd, 0) | ERR + return list(events.items()) + + def close(self): + self._rfd.clear() + self._wfd.clear() + self._efd.clear() + + +def _get_poller(): + if detect_environment() != 'default': + # greenlet + return _select + elif epoll: + # Py2.6+ Linux + return _epoll + elif kqueue: + # Py2.6+ on BSD / Darwin + return _select # was: _kqueue + else: + return _select + + +def poll(*args, **kwargs): + return _get_poller()(*args, **kwargs) diff --git a/kombu/utils/functional.py b/kombu/utils/functional.py new file mode 100644 index 0000000..746f42f --- /dev/null +++ b/kombu/utils/functional.py @@ -0,0 +1,82 @@ +from __future__ import absolute_import + +import sys + +from collections import Iterable, Mapping + +from kombu.five import string_t + +__all__ = ['lazy', 'maybe_evaluate', 'is_list', 'maybe_list'] + + +class lazy(object): + """Holds lazy evaluation. + + Evaluated when called or if the :meth:`evaluate` method is called. + The function is re-evaluated on every call. + + Overloaded operations that will evaluate the promise: + :meth:`__str__`, :meth:`__repr__`, :meth:`__cmp__`. + + """ + + def __init__(self, fun, *args, **kwargs): + self._fun = fun + self._args = args + self._kwargs = kwargs + + def __call__(self): + return self.evaluate() + + def evaluate(self): + return self._fun(*self._args, **self._kwargs) + + def __str__(self): + return str(self()) + + def __repr__(self): + return repr(self()) + + def __eq__(self, rhs): + return self() == rhs + + def __ne__(self, rhs): + return self() != rhs + + def __deepcopy__(self, memo): + memo[id(self)] = self + return self + + def __reduce__(self): + return (self.__class__, (self._fun, ), {'_args': self._args, + '_kwargs': self._kwargs}) + + if sys.version_info[0] < 3: + + def __cmp__(self, rhs): + if isinstance(rhs, self.__class__): + return -cmp(rhs, self()) + return cmp(self(), rhs) + + +def maybe_evaluate(value): + """Evaluates if the value is a :class:`lazy` instance.""" + if isinstance(value, lazy): + return value.evaluate() + return value + + +def is_list(l, scalars=(Mapping, string_t), iters=(Iterable, )): + """Return true if the object is iterable (but not + if object is a mapping or string).""" + return isinstance(l, iters) and not isinstance(l, scalars or ()) + + +def maybe_list(l, scalars=(Mapping, string_t)): + """Return list of one element if ``l`` is a scalar.""" + return l if l is None or is_list(l, scalars) else [l] + + +# Compat names (before kombu 3.0) +promise = lazy +maybe_promise = maybe_evaluate diff --git a/kombu/utils/limits.py b/kombu/utils/limits.py new file mode 100644 index 0000000..48eb536 --- /dev/null +++ b/kombu/utils/limits.py @@ -0,0 +1,68 @@ +""" +kombu.utils.limits +================== + +Token bucket implementation for rate limiting. + +""" +from __future__ import absolute_import + +from kombu.five import monotonic + +__all__ = ['TokenBucket'] + + +class TokenBucket(object): + """Token Bucket Algorithm. + + See http://en.wikipedia.org/wiki/Token_Bucket + Most of this code was stolen from an entry in the ASPN Python Cookbook: + http://code.activestate.com/recipes/511490/ + + .. admonition:: Thread safety + + This implementation may not be thread safe. + + """ + + #: The rate in tokens/second that the bucket will be refilled. + fill_rate = None + + #: Maximum number of tokens in the bucket. + capacity = 1 + + #: Timestamp of the last time a token was taken out of the bucket. + timestamp = None + + def __init__(self, fill_rate, capacity=1): + self.capacity = float(capacity) + self._tokens = capacity + self.fill_rate = float(fill_rate) + self.timestamp = monotonic() + + def can_consume(self, tokens=1): + """Return :const:`True` if the number of tokens can be consumed + from the bucket.""" + if tokens <= self._get_tokens(): + self._tokens -= tokens + return True + return False + + def expected_time(self, tokens=1): + """Return the time (in seconds) when a new token is expected + to be available. + + This will also consume a token from the bucket. + + """ + _tokens = self._get_tokens() + tokens = max(tokens, _tokens) + return (tokens - _tokens) / self.fill_rate + + def _get_tokens(self): + if self._tokens < self.capacity: + now = monotonic() + delta = self.fill_rate * (now - self.timestamp) + self._tokens = min(self.capacity, self._tokens + delta) + self.timestamp = now + return self._tokens diff --git a/kombu/utils/text.py b/kombu/utils/text.py new file mode 100644 index 0000000..066b28a --- /dev/null +++ b/kombu/utils/text.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from difflib import SequenceMatcher + +from kombu import version_info_t +from kombu.five import string_t + + +def fmatch_iter(needle, haystack, min_ratio=0.6): + for key in haystack: + ratio = SequenceMatcher(None, needle, key).ratio() + if ratio >= min_ratio: + yield ratio, key + + +def fmatch_best(needle, haystack, min_ratio=0.6): + try: + return sorted( + fmatch_iter(needle, haystack, min_ratio), reverse=True, + )[0][1] + except IndexError: + pass + + +def version_string_as_tuple(s): + v = _unpack_version(*s.split('.')) + # X.Y.3a1 -> (X, Y, 3, 'a1') + if isinstance(v.micro, string_t): + v = version_info_t(v.major, v.minor, *_splitmicro(*v[2:])) + # X.Y.3a1-40 -> (X, Y, 3, 'a1', '40') + if not v.serial and v.releaselevel and '-' in v.releaselevel: + v = version_info_t(*list(v[0:3]) + v.releaselevel.split('-')) + return v + + +def _unpack_version(major, minor=0, micro=0, releaselevel='', serial=''): + return version_info_t(int(major), int(minor), micro, releaselevel, serial) + + +def _splitmicro(micro, releaselevel='', serial=''): + for index, char in enumerate(micro): + if not char.isdigit(): + break + else: + return int(micro or 0), releaselevel, serial + return int(micro[:index]), micro[index:], serial diff --git a/kombu/utils/url.py b/kombu/utils/url.py new file mode 100644 index 0000000..f93282d --- /dev/null +++ b/kombu/utils/url.py @@ -0,0 +1,64 @@ +from __future__ import absolute_import + +from functools import partial + +try: + from urllib.parse import parse_qsl, quote, unquote, urlparse +except ImportError: + from urllib import quote, unquote # noqa + from urlparse import urlparse, parse_qsl # noqa + +from . import kwdict +from kombu.five import string_t + +safequote = partial(quote, safe='') + + +def _parse_url(url): + scheme = urlparse(url).scheme + schemeless = url[len(scheme) + 3:] + # parse with HTTP URL semantics + parts = urlparse('http://' + schemeless) + path = parts.path or '' + path = path[1:] if path and path[0] == '/' else path + return (scheme, unquote(parts.hostname or '') or None, parts.port, + unquote(parts.username or '') or None, + unquote(parts.password or '') or None, + unquote(path or '') or None, + kwdict(dict(parse_qsl(parts.query)))) + + +def parse_url(url): + scheme, host, port, user, password, path, query = _parse_url(url) + return dict(transport=scheme, hostname=host, + port=port, userid=user, + password=password, virtual_host=path, **query) + + +def as_url(scheme, host=None, port=None, user=None, password=None, + path=None, query=None, sanitize=False, mask='**'): + parts = ['{0}://'.format(scheme)] + if user or password: + if user: + parts.append(safequote(user)) + if password: + if sanitize: + parts.extend([':', mask] if mask else [':']) + else: + parts.extend([':', safequote(password)]) + parts.append('@') + parts.append(safequote(host) if host else '') + if port: + parts.extend([':', port]) + parts.extend(['/', path]) + return ''.join(str(part) for part in parts if part) + + +def sanitize_url(url, mask='**'): + return as_url(*_parse_url(url), sanitize=True, mask=mask) + + +def maybe_sanitize_url(url, mask='**'): + if isinstance(url, string_t) and '://' in url: + return sanitize_url(url, mask) + return url diff --git a/requirements/default.txt b/requirements/default.txt new file mode 100644 index 0000000..520bd90 --- /dev/null +++ b/requirements/default.txt @@ -0,0 +1,2 @@ +anyjson>=0.3.3 +amqp>=1.4.5,<2.0 diff --git a/requirements/dev.txt b/requirements/dev.txt new file mode 100644 index 0000000..0ca0ffd --- /dev/null +++ b/requirements/dev.txt @@ -0,0 +1 @@ +https://github.com/celery/py-amqp/zipball/master diff --git a/requirements/docs.txt b/requirements/docs.txt new file mode 100644 index 0000000..dcf9838 --- /dev/null +++ b/requirements/docs.txt @@ -0,0 +1,3 @@ +Sphinx +sphinxcontrib-issuetracker>=0.9 +Django diff --git a/requirements/extras/beanstalk.txt b/requirements/extras/beanstalk.txt new file mode 100644 index 0000000..c62c81b --- /dev/null +++ b/requirements/extras/beanstalk.txt @@ -0,0 +1 @@ +beanstalkc diff --git a/requirements/extras/couchdb.txt b/requirements/extras/couchdb.txt new file mode 100644 index 0000000..3e100d4 --- /dev/null +++ b/requirements/extras/couchdb.txt @@ -0,0 +1 @@ +couchdb diff --git a/requirements/extras/kazoo.txt b/requirements/extras/kazoo.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/requirements/extras/kazoo.txt @@ -0,0 +1 @@ + diff --git a/requirements/extras/librabbitmq.txt b/requirements/extras/librabbitmq.txt new file mode 100644 index 0000000..866d11b --- /dev/null +++ b/requirements/extras/librabbitmq.txt @@ -0,0 +1 @@ +librabbitmq>=1.5.2 diff --git a/requirements/extras/mongodb.txt b/requirements/extras/mongodb.txt new file mode 100644 index 0000000..19e59fe --- /dev/null +++ b/requirements/extras/mongodb.txt @@ -0,0 +1 @@ +pymongo>=2.6.2 diff --git a/requirements/extras/msgpack.txt b/requirements/extras/msgpack.txt new file mode 100644 index 0000000..bf7cb78 --- /dev/null +++ b/requirements/extras/msgpack.txt @@ -0,0 +1 @@ +msgpack-python>=0.3.0 diff --git a/requirements/extras/pyro.txt b/requirements/extras/pyro.txt new file mode 100644 index 0000000..d19b0db --- /dev/null +++ b/requirements/extras/pyro.txt @@ -0,0 +1 @@ +pyro4 diff --git a/requirements/extras/redis.txt b/requirements/extras/redis.txt new file mode 100644 index 0000000..4a645b4 --- /dev/null +++ b/requirements/extras/redis.txt @@ -0,0 +1 @@ +redis>=2.8.0 diff --git a/requirements/extras/slmq.txt b/requirements/extras/slmq.txt new file mode 100644 index 0000000..2f06ed2 --- /dev/null +++ b/requirements/extras/slmq.txt @@ -0,0 +1 @@ +softlayer_messaging>=1.0.3 diff --git a/requirements/extras/sqlalchemy.txt b/requirements/extras/sqlalchemy.txt new file mode 100644 index 0000000..39fb2be --- /dev/null +++ b/requirements/extras/sqlalchemy.txt @@ -0,0 +1 @@ +sqlalchemy diff --git a/requirements/extras/sqs.txt b/requirements/extras/sqs.txt new file mode 100644 index 0000000..66b9583 --- /dev/null +++ b/requirements/extras/sqs.txt @@ -0,0 +1 @@ +boto>=2.13.3 diff --git a/requirements/extras/yaml.txt b/requirements/extras/yaml.txt new file mode 100644 index 0000000..17bf7fd --- /dev/null +++ b/requirements/extras/yaml.txt @@ -0,0 +1 @@ +PyYAML>=3.10 diff --git a/requirements/extras/zeromq.txt b/requirements/extras/zeromq.txt new file mode 100644 index 0000000..d34ee10 --- /dev/null +++ b/requirements/extras/zeromq.txt @@ -0,0 +1 @@ +pyzmq>=13.1.0 diff --git a/requirements/extras/zookeeper.txt b/requirements/extras/zookeeper.txt new file mode 100644 index 0000000..81893ea --- /dev/null +++ b/requirements/extras/zookeeper.txt @@ -0,0 +1 @@ +kazoo>=1.3.1 diff --git a/requirements/funtest.txt b/requirements/funtest.txt new file mode 100644 index 0000000..6ac859b --- /dev/null +++ b/requirements/funtest.txt @@ -0,0 +1,24 @@ +# redis transport +redis + +# MongoDB transport +pymongo + +# CouchDB transport +couchdb + +# Beanstalk transport +beanstalkc + +# Zookeeper transport +kazoo + +# SQLAlchemy transport +kombu-sqlalchemy + +# Django ORM transport +Django +django-kombu + +# SQS transport +boto diff --git a/requirements/pkgutils.txt b/requirements/pkgutils.txt new file mode 100644 index 0000000..5da811f --- /dev/null +++ b/requirements/pkgutils.txt @@ -0,0 +1,3 @@ +paver +flake8 +Sphinx diff --git a/requirements/py26.txt b/requirements/py26.txt new file mode 100644 index 0000000..1807d7c --- /dev/null +++ b/requirements/py26.txt @@ -0,0 +1,2 @@ +importlib +ordereddict diff --git a/requirements/test-ci.txt b/requirements/test-ci.txt new file mode 100644 index 0000000..1962309 --- /dev/null +++ b/requirements/test-ci.txt @@ -0,0 +1,6 @@ +boto +coverage>=3.0 +coveralls +redis +PyYAML +msgpack-python>0.2.0 # 0.2.0 dropped 2.5 support diff --git a/requirements/test-ci3.txt b/requirements/test-ci3.txt new file mode 100644 index 0000000..c5617bb --- /dev/null +++ b/requirements/test-ci3.txt @@ -0,0 +1,5 @@ +coverage>=3.0 +coveralls +redis +PyYAML +msgpack-python>0.2.0 # 0.2.0 dropped 2.5 support diff --git a/requirements/test.txt b/requirements/test.txt new file mode 100644 index 0000000..f9bd644 --- /dev/null +++ b/requirements/test.txt @@ -0,0 +1,3 @@ +nose +unittest2>=0.5.0 +mock diff --git a/requirements/test3.txt b/requirements/test3.txt new file mode 100644 index 0000000..fdf7c43 --- /dev/null +++ b/requirements/test3.txt @@ -0,0 +1,3 @@ +setuptools>=0.7 +nose +mock>=0.7.0 diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..0b5142b --- /dev/null +++ b/setup.cfg @@ -0,0 +1,34 @@ +[nosetests] +verbosity = 1 +detailed-errors = 1 +where = kombu/tests + +[build_sphinx] +source-dir = docs/ +build-dir = docs/.build +all_files = 1 + +[upload_sphinx] +upload-dir = docs/.build/html + +[bdist_rpm] +requires = anyjson >= 0.3.3 + amqp >= 1.4.5 + importlib + ordereddict + +[wheel] +universal = 1 + +[metadata] +requires-dist = + anyjson >= 0.3.3 + amqp >= 1.4.5,<2.0 + importlib; python_version == "2.6" + ordereddict; python_version == "2.6" + +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 + diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..f96f492 --- /dev/null +++ b/setup.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import os +import sys +import codecs + +extra = {} +PY3 = sys.version_info[0] == 3 + +if sys.version_info < (2, 6): + raise Exception('Kombu requires Python 2.6 or higher.') + +try: + from setuptools import setup +except ImportError: + from distutils.core import setup # noqa + +from distutils.command.install import INSTALL_SCHEMES + +# -- Parse meta +import re +re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)') +re_vers = re.compile(r'VERSION\s*=.*?\((.*?)\)') +re_doc = re.compile(r'^"""(.+?)"""') +rq = lambda s: s.strip("\"'") + + +def add_default(m): + attr_name, attr_value = m.groups() + return ((attr_name, rq(attr_value)), ) + + +def add_version(m): + v = list(map(rq, m.groups()[0].split(', '))) + return (('VERSION', '.'.join(v[0:3]) + ''.join(v[3:])), ) + + +def add_doc(m): + return (('doc', m.groups()[0]), ) + +pats = {re_meta: add_default, + re_vers: add_version, + re_doc: add_doc} +here = os.path.abspath(os.path.dirname(__file__)) +meta_fh = open(os.path.join(here, 'kombu/__init__.py')) +try: + meta = {} + for line in meta_fh: + if line.strip() == '# -eof meta-': + break + for pattern, handler in pats.items(): + m = pattern.match(line.strip()) + if m: + meta.update(handler(m)) +finally: + meta_fh.close() +# -- + +packages, data_files = [], [] +root_dir = os.path.dirname(__file__) +if root_dir != '': + os.chdir(root_dir) +src_dir = 'kombu' + + +def fullsplit(path, result=None): + if result is None: + result = [] + head, tail = os.path.split(path) + if head == '': + return [tail] + result + if head == path: + return result + return fullsplit(head, [tail] + result) + + +for scheme in list(INSTALL_SCHEMES.values()): + scheme['data'] = scheme['purelib'] + +for dirpath, dirnames, filenames in os.walk(src_dir): + # Ignore dirnames that start with '.' + for i, dirname in enumerate(dirnames): + if dirname.startswith('.'): + del dirnames[i] + for filename in filenames: + if filename.endswith('.py'): + packages.append('.'.join(fullsplit(dirpath))) + else: + data_files.append( + [dirpath, [os.path.join(dirpath, f) for f in filenames]], + ) + +if os.path.exists('README.rst'): + long_description = codecs.open('README.rst', 'r', 'utf-8').read() +else: + long_description = 'See http://pypi.python.org/pypi/kombu' + +# -*- Installation Requires -*- +py_version = sys.version_info +is_jython = sys.platform.startswith('java') +is_pypy = hasattr(sys, 'pypy_version_info') + + +def strip_comments(l): + return l.split('#', 1)[0].strip() + + +def reqs(*f): + return [ + r for r in ( + strip_comments(l) for l in open( + os.path.join(os.getcwd(), 'requirements', *f)).readlines() + ) if r] + +install_requires = reqs('default.txt') +if py_version[0:2] == (2, 6): + install_requires.extend(reqs('py26.txt')) + +# -*- Tests Requires -*- + +tests_require = reqs('test3.txt' if PY3 else 'test.txt') + +extras = lambda *p: reqs('extras', *p) +extras_require = extra['extras_require'] = { + 'msgpack': extras('msgpack.txt'), + 'yaml': extras('yaml.txt'), + 'redis': extras('redis.txt'), + 'mongodb': extras('mongodb.txt'), + 'sqs': extras('sqs.txt'), + 'couchdb': extras('couchdb.txt'), + 'beanstalk': extras('beanstalk.txt'), + 'zookeeper': extras('zookeeper.txt'), + 'zeromq': extras('zeromq.txt'), + 'sqlalchemy': extras('sqlalchemy.txt'), + 'librabbitmq': extras('librabbitmq.txt'), + 'pyro': extras('pyro.txt'), + 'slmq': extras('slmq.txt'), +} + +extras_require[':python_version=="2.6"'] = reqs('py26.txt') + +setup( + name='kombu', + version=meta['VERSION'], + description=meta['doc'], + author=meta['author'], + author_email=meta['contact'], + url=meta['homepage'], + platforms=['any'], + packages=packages, + data_files=data_files, + zip_safe=False, + test_suite='nose.collector', + install_requires=install_requires, + tests_require=tests_require, + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'License :: OSI Approved :: BSD License', + 'Operating System :: OS Independent', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: Implementation :: CPython', + 'Programming Language :: Python :: Implementation :: PyPy', + 'Programming Language :: Python :: Implementation :: Jython', + 'Intended Audience :: Developers', + 'Topic :: Communications', + 'Topic :: System :: Distributed Computing', + 'Topic :: System :: Networking', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + long_description=long_description, + **extra)