diff --git a/.azure-pipelines/client.test.live.yml b/.azure-pipelines/client.test.live.yml index 3e42ba3a427b..c2232f93d628 100644 --- a/.azure-pipelines/client.test.live.yml +++ b/.azure-pipelines/client.test.live.yml @@ -31,6 +31,7 @@ jobs: - script: 'pytest -m liveTest $(ServicePackage)' displayName: 'Run test suite' env: + # Service Bus Variables SERVICE_BUS_HOSTNAME: $(python-sb-livetest-service-bus-hostname) SERVICE_BUS_SAS_POLICY: $(python-sb-livetest-service-sas-policy) SERVICE_BUS_SAS_KEY: $(python-sb-livetest-service-sas-key) @@ -38,3 +39,13 @@ jobs: SERVICE_BUS_CONNECTION_STR_RO: $(python-sb-livetest-service-connection-str-ro) SERVICE_BUS_CONNECTION_STR_WO: $(python-sb-livetest-service-connection-str-wo) SERVICE_BUS_CONNECTION_STR_ENTITY: $(python-sb-livetest-service-connection-entity) + # Event Hubs Variables + AZURE_STORAGE_ACCOUNT: $(python-eh-livetest-event-hub-storage-account) + AZURE_STORAGE_ACCESS_KEY: $(python-eh-livetest-event-hub-storage-access-key) + EVENT_HUB_HOSTNAME: $(python-eh-livetest-event-hub-hostname) + EVENT_HUB_NAME: $(python-eh-livetest-event-hub-name) + EVENT_HUB_SAS_POLICY: $(python-eh-livetest-event-hub-sas-policy) + EVENT_HUB_SAS_KEY: $(python-eh-livetest-event-hub-sas-key) + EVENT_HUB_NAMESPACE: $(python-eh-livetest-event-hub-namespace) + IOTHUB_CONNECTION_STR: $(python-eh-livetest-event-hub-iothub-connection-str) + IOTHUB_DEVICE: $(python-eh-livetest-event-hub-iothub-device) diff --git a/azure-eventhubs/HISTORY.rst b/azure-eventhubs/HISTORY.rst new file mode 100644 index 000000000000..e7fd3a2571f5 --- /dev/null +++ b/azure-eventhubs/HISTORY.rst @@ -0,0 +1,138 @@ +.. :changelog: + +Release History +=============== + +1.3.1 (2019-02-28) +------------------ + +**BugFixes** + +- Fixed bug where datetime offset filter was using a local timestamp rather than UTC. +- Fixed stackoverflow error in continuous connection reconnect attempts. + + +1.3.0 (2019-01-29) +------------------ + +**Bugfixes** + +- Added support for auto reconnect on token expiration and other auth errors (issue #89). + +**Features** + +- Added ability to create ServiceBusClient from an existing SAS auth token, including + provding a function to auto-renew that token on expiry. +- Added support for storing a custom EPH context value in checkpoint (PR #84, thanks @konstantinmiller) + + +1.2.0 (2018-11-29) +------------------ + +- Support for Python 2.7 in azure.eventhub module (azure.eventprocessorhost will not support Python 2.7). +- Parse EventData.enqueued_time as a UTC timestamp (issue #72, thanks @vjrantal) + + +1.1.1 (2018-10-03) +------------------ + +- Fixed bug in Azure namespace package. + + +1.1.0 (2018-09-21) +------------------ + +- Changes to `AzureStorageCheckpointLeaseManager` parameters to support other connection options (issue #61): + + - The `storage_account_name`, `storage_account_key` and `lease_container_name` arguments are now optional keyword arguments. + - Added a `sas_token` argument that must be specified with `storage_account_name` in place of `storage_account_key`. + - Added an `endpoint_suffix` argument to support storage endpoints in National Clouds. + - Added a `connection_string` argument that, if specified, overrides all other endpoint arguments. + - The `lease_container_name` argument now defaults to `"eph-leases"` if not specified. + +- Fix for clients failing to start if run called multipled times (issue #64). +- Added convenience methods `body_as_str` and `body_as_json` to EventData object for easier processing of message data. + + +1.0.0 (2018-08-22) +------------------ + +- API stable. +- Renamed internal `_async` module to `async_ops` for docs generation. +- Added optional `auth_timeout` parameter to `EventHubClient` and `EventHubClientAsync` to configure how long to allow for token + negotiation to complete. Default is 60 seconds. +- Added optional `send_timeout` parameter to `EventHubClient.add_sender` and `EventHubClientAsync.add_async_sender` to determine the + timeout for Events to be successfully sent. Default value is 60 seconds. +- Reformatted logging for performance. + + +0.2.0 (2018-08-06) +------------------ + +- Stability improvements for EPH. +- Updated uAMQP version. +- Added new configuration options for Sender and Receiver; `keep_alive` and `auto_reconnect`. + These flags have been added to the following: + + - `EventHubClient.add_receiver` + - `EventHubClient.add_sender` + - `EventHubClientAsync.add_async_receiver` + - `EventHubClientAsync.add_async_sender` + - `EPHOptions.keey_alive_interval` + - `EPHOptions.auto_reconnect_on_error` + + +0.2.0rc2 (2018-07-29) +--------------------- + +- **Breaking change** `EventData.offset` will now return an object of type `~uamqp.common.Offset` rather than str. + The original string value can be retrieved from `~uamqp.common.Offset.value`. +- Each sender/receiver will now run in its own independent connection. +- Updated uAMQP dependency to 0.2.0 +- Fixed issue with IoTHub clients not being able to retrieve partition information. +- Added support for HTTP proxy settings to both EventHubClient and EPH. +- Added error handling policy to automatically reconnect on retryable error. +- Added keep-alive thread for maintaining an unused connection. + + +0.2.0rc1 (2018-07-06) +--------------------- + +- **Breaking change** Restructured library to support Python 3.7. Submodule `async` has been renamed and all classes from + this module can now be imported from azure.eventhub directly. +- **Breaking change** Removed optional `callback` argument from `Receiver.receive` and `AsyncReceiver.receive`. +- **Breaking change** `EventData.properties` has been renamed to `EventData.application_properties`. + This removes the potential for messages to be processed via callback for not yet returned + in the batch. +- Updated uAMQP dependency to v0.1.0 +- Added support for constructing IoTHub connections. +- Fixed memory leak in receive operations. +- Dropped Python 2.7 wheel support. + + +0.2.0b2 (2018-05-29) +-------------------- + +- Added `namespace_suffix` to EventHubConfig() to support national clouds. +- Added `device_id` attribute to EventData to support IoT Hub use cases. +- Added message header to workaround service bug for PartitionKey support. +- Updated uAMQP dependency to vRC1. + + +0.2.0b1 (2018-04-20) +-------------------- + +- Updated uAMQP to latest version. +- Further testing and minor bug fixes. + + +0.2.0a2 (2018-04-02) +-------------------- + +- Updated uAQMP dependency. + + +0.2.0a1 (unreleased) +-------------------- + +- Swapped out Proton dependency for uAMQP. \ No newline at end of file diff --git a/azure-eventhubs/LICENSE b/azure-eventhubs/LICENSE new file mode 100644 index 000000000000..4b1ad51b2f0e --- /dev/null +++ b/azure-eventhubs/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/azure-eventhubs/MANIFEST.in b/azure-eventhubs/MANIFEST.in new file mode 100644 index 000000000000..c9c2d50a3ec3 --- /dev/null +++ b/azure-eventhubs/MANIFEST.in @@ -0,0 +1,2 @@ +include *.rst +include azure/__init__.py \ No newline at end of file diff --git a/azure-eventhubs/README.rst b/azure-eventhubs/README.rst new file mode 100644 index 000000000000..d6fcf10b1373 --- /dev/null +++ b/azure-eventhubs/README.rst @@ -0,0 +1,229 @@ +Azure Event Hubs client library for Python +========================================== + +Azure Event Hubs is a big data streaming platform and event ingestion service. It can receive and process millions of events per second. + +Use the Event Hubs client library for Python to: + +- Publish events to the Event Hubs service through a sender. +- Read events from the Event Hubs service through a receiver. + +On Python 3.5 and above, it also includes: + +- An async sender and receiver that supports async/await methods. +- An Event Processor Host module that manages the distribution of partition readers. + +`Source code `__ | `Package (PyPi) `__ | `API reference documentation `__ | `Product documentation `__ + +Getting started +=============== + +Install the package +------------------- + +Install the Azure Event Hubs client library for Python with pip: + +.. code:: shell + + $ pip install azure-eventhub + +Prerequisites ++++++++++++++ + +- An Azure subscription. +- Python 3.4 or later. +- An existing Event Hubs namespace and event hub. You can create these entities by following the instructions in `this article `__. + +Authenticate the client +----------------------- + +Interaction with Event Hubs starts with an instance of the EventHubClient class. You need the host name, sas policy name, sas key and event hub name to instantiate the client object. + +Get credentials ++++++++++++++++ + +You can find credential information in `Azure Portal `__. + +Create client ++++++++++++++ + +There are several ways to instantiate the EventHubClient object and the following code snippets demonstrate one way: + +.. code:: python + + import os + from azure.eventhub import EventHubClient + + connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( + os.environ['EVENT_HUB_HOSTNAME'], + os.environ['EVENT_HUB_SAS_POLICY'], + os.environ['EVENT_HUB_SAS_KEY'], + os.environ['EVENT_HUB_NAME']) + client = EventHubClient.from_connection_string(connection_str) + +Key concepts +============ + +- **Namespace:** An Event Hubs namespace provides a unique scoping container, referenced by its fully qualified domain name, in which you create one or more event hubs or Kafka topics. + +- **Event publishers**: Any entity that sends data to an event hub is an event producer, or event publisher. Event publishers can publish events using HTTPS or AMQP 1.0 or Kafka 1.0 and later. Event publishers use a Shared Access Signature (SAS) token to identify themselves to an event hub, and can have a unique identity, or use a common SAS token. + +- **Event consumers**: Any entity that reads event data from an event hub is an event consumer. All Event Hubs consumers connect via the AMQP 1.0 session and events are delivered through the session as they become available. The client does not need to poll for data availability. + +- **SAS tokens**: Event Hubs uses Shared Access Signatures, which are available at the namespace and event hub level. A SAS token is generated from a SAS key and is an SHA hash of a URL, encoded in a specific format. Using the name of the key (policy) and the token, Event Hubs can regenerate the hash and thus authenticate the sender. + +For more information about these concepts, see `Features and terminology in Azure Event Hubs `__. + +Examples +======== + +The following sections provide several code snippets covering some of the most common Event Hubs tasks, including: + +- `Send event data`_ +- `Receive event data`_ +- `Async send event data`_ +- `Async receive event data`_ + +.. _`Send event data`: + +Send event data +--------------- + +Sends an event data and blocks until acknowledgement is received or operation times out. + +.. code:: python + + client = EventHubClient.from_connection_string(connection_str) + sender = client.add_sender(partition="0") + try: + client.run() + event_data = EventData(b"A single event") + sender.send(event_data) + except: + raise + finally: + client.stop() + +.. _`Receive event data`: + +Receive event data +------------------ + +Receive events from the EventHub. + +.. code:: python + + client = EventHubClient.from_connection_string(connection_str) + receiver = client.add_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + try: + client.run() + logger = logging.getLogger("azure.eventhub") + received = receiver.receive(timeout=5, max_batch_size=100) + for event_data in received: + logger.info("Message received:{}".format(event_data.body_as_str())) + except: + raise + finally: + client.stop() + +.. _`Async send event data`: + +Async send event data +--------------------- + +Sends an event data and asynchronously waits until acknowledgement is received or operation times out. + +.. code:: python + + client = EventHubClientAsync.from_connection_string(connection_str) + sender = client.add_async_sender(partition="0") + try: + await client.run_async() + event_data = EventData(b"A single event") + await sender.send(event_data) + except: + raise + finally: + await client.stop_async() + +.. _`Async receive event data`: + +Async receive event data +------------------------ + +Receive events asynchronously from the EventHub. + +.. code:: python + + client = EventHubClientAsync.from_connection_string(connection_str) + receiver = client.add_async_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + try: + await client.run_async() + logger = logging.getLogger("azure.eventhub") + received = await receiver.receive(timeout=5) + for event_data in received: + logger.info("Message received:{}".format(event_data.body_as_str())) + except: + raise + finally: + await client.stop_async() + +Troubleshooting +=============== + +General +------- + +The Event Hubs APIs generate exceptions that can fall into the following categories, along with the associated action you can take to try to fix them. + +- **User coding error:** System.ArgumentException, System.InvalidOperationException, System.OperationCanceledException, System.Runtime.Serialization.SerializationException. General action: try to fix the code before proceeding. +- **Setup/configuration error:** Microsoft.ServiceBus.Messaging.MessagingEntityNotFoundException, Microsoft.Azure.EventHubs.MessagingEntityNotFoundException, System.UnauthorizedAccessException. General action: review your configuration and change if necessary. +- **Transient exceptions:** Microsoft.ServiceBus.Messaging.MessagingException, Microsoft.ServiceBus.Messaging.ServerBusyException, Microsoft.Azure.EventHubs.ServerBusyException, Microsoft.ServiceBus.Messaging.MessagingCommunicationException. General action: retry the operation or notify users. +- **Other exceptions:** System.Transactions.TransactionException, System.TimeoutException, Microsoft.ServiceBus.Messaging.MessageLockLostException, Microsoft.ServiceBus.Messaging.SessionLockLostException. General action: specific to the exception type; refer to the table in `Event Hubs messaging exceptions `__. + +For more detailed infromation about excpetions and how to deal with them , see `Event Hubs messaging exceptions `__. + +Next steps +========== + +Examples +-------- + +- ./examples/send.py - use sender to publish events +- ./examples/recv.py - use receiver to read events +- ./examples/send_async.py - async/await support of a sender +- ./examples/recv_async.py - async/await support of a receiver +- ./examples/eph.py - event processor host + +Documentation +------------- +Reference documentation is available at `docs.microsoft.com/python/api/azure-eventhub `__. + +Logging +------- + +- enable 'azure.eventhub' logger to collect traces from the library +- enable 'uamqp' logger to collect traces from the underlying uAMQP library +- enable AMQP frame level trace by setting `debug=True` when creating the Client + +Provide Feedback +---------------- + +If you encounter any bugs or have suggestions, please file an issue in the +`Issues `__ +section of the project. + +Contributing +============ + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit `https://cla.microsoft.com `__. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the `Microsoft Open Source Code of Conduct `__. +For more information see the `Code of Conduct FAQ `__ or +contact `opencode@microsoft.com `__ with any additional questions or comments. \ No newline at end of file diff --git a/azure-eventhubs/azure/__init__.py b/azure-eventhubs/azure/__init__.py new file mode 100644 index 000000000000..899906dc0ed1 --- /dev/null +++ b/azure-eventhubs/azure/__init__.py @@ -0,0 +1,2 @@ + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/azure-eventhubs/azure/eventhub/__init__.py b/azure-eventhubs/azure/eventhub/__init__.py new file mode 100644 index 000000000000..7067761d5ef6 --- /dev/null +++ b/azure-eventhubs/azure/eventhub/__init__.py @@ -0,0 +1,19 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +__version__ = "1.3.1" + +from azure.eventhub.common import EventData, EventHubError, Offset +from azure.eventhub.client import EventHubClient +from azure.eventhub.sender import Sender +from azure.eventhub.receiver import Receiver + +try: + from azure.eventhub.async_ops import ( + EventHubClientAsync, + AsyncSender, + AsyncReceiver) +except (ImportError, SyntaxError): + pass # Python 3 async features not supported diff --git a/azure-eventhubs/azure/eventhub/async_ops/__init__.py b/azure-eventhubs/azure/eventhub/async_ops/__init__.py new file mode 100644 index 000000000000..784746d04bb8 --- /dev/null +++ b/azure-eventhubs/azure/eventhub/async_ops/__init__.py @@ -0,0 +1,325 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import logging +import asyncio +import time +import datetime +from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus + +from uamqp import authentication, constants, types, errors +from uamqp import ( + Message, + ConnectionAsync, + AMQPClientAsync, + SendClientAsync, + ReceiveClientAsync) + +from azure.eventhub.common import parse_sas_token +from azure.eventhub import ( + Sender, + Receiver, + EventHubClient, + EventData, + EventHubError) + +from .sender_async import AsyncSender +from .receiver_async import AsyncReceiver + + +log = logging.getLogger(__name__) + + +class EventHubClientAsync(EventHubClient): + """ + The EventHubClient class defines a high level interface for asynchronously + sending events to and receiving events from the Azure Event Hubs service. + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START create_eventhub_client_async] + :end-before: [END create_eventhub_client_async] + :language: python + :dedent: 4 + :caption: Create a new instance of the Event Hub client async. + + """ + + def _create_auth(self, username=None, password=None): + """ + Create an ~uamqp.authentication.cbs_auth_async.SASTokenAuthAsync instance to authenticate + the session. + + :param username: The name of the shared access policy. + :type username: str + :param password: The shared access key. + :type password: str + """ + if self.sas_token: + token = self.sas_token() if callable(self.sas_token) else self.sas_token + try: + expiry = int(parse_sas_token(token)['se']) + except (KeyError, TypeError, IndexError): + raise ValueError("Supplied SAS token has no valid expiry value.") + return authentication.SASTokenAsync( + self.auth_uri, self.auth_uri, token, + expires_at=expiry, + timeout=self.auth_timeout, + http_proxy=self.http_proxy) + + username = username or self._auth_config['username'] + password = password or self._auth_config['password'] + if "@sas.root" in username: + return authentication.SASLPlain( + self.address.hostname, username, password, http_proxy=self.http_proxy) + return authentication.SASTokenAsync.from_shared_access_key( + self.auth_uri, username, password, timeout=self.auth_timeout, http_proxy=self.http_proxy) + + async def _close_clients_async(self): + """ + Close all open AsyncSender/AsyncReceiver clients. + """ + await asyncio.gather(*[c.close_async() for c in self.clients]) + + async def _wait_for_client(self, client): + try: + while client.get_handler_state().value == 2: + await client._handler._connection.work_async() # pylint: disable=protected-access + except Exception as exp: # pylint: disable=broad-except + await client.close_async(exception=exp) + + async def _start_client_async(self, client): + try: + if not client.running: + await client.open_async() + except Exception as exp: # pylint: disable=broad-except + log.info("Encountered error while starting handler: %r", exp) + await client.close_async(exception=exp) + log.info("Finished closing failed handler") + + async def _handle_redirect(self, redirects): + if len(redirects) != len(self.clients): + not_redirected = [c for c in self.clients if not c.redirected] + _, timeout = await asyncio.wait([self._wait_for_client(c) for c in not_redirected], timeout=5) + if timeout: + raise EventHubError("Some clients are attempting to redirect the connection.") + redirects = [c.redirected for c in self.clients if c.redirected] + if not all(r.hostname == redirects[0].hostname for r in redirects): + raise EventHubError("Multiple clients attempting to redirect to different hosts.") + self._process_redirect_uri(redirects[0]) + await asyncio.gather(*[c.open_async() for c in self.clients]) + + async def run_async(self): + """ + Run the EventHubClient asynchronously. + Opens the connection and starts running all AsyncSender/AsyncReceiver clients. + Returns a list of the start up results. For a succcesful client start the + result will be `None`, otherwise the exception raised. + If all clients failed to start, then run will fail, shut down the connection + and raise an exception. + If at least one client starts up successfully the run command will succeed. + + :rtype: list[~azure.eventhub.common.EventHubError] + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_run_async] + :end-before: [END eventhub_client_run_async] + :language: python + :dedent: 4 + :caption: Run the EventHubClient asynchronously. + + """ + log.info("%r: Starting %r clients", self.container_id, len(self.clients)) + tasks = [self._start_client_async(c) for c in self.clients] + try: + await asyncio.gather(*tasks) + redirects = [c.redirected for c in self.clients if c.redirected] + failed = [c.error for c in self.clients if c.error] + if failed and len(failed) == len(self.clients): + log.warning("%r: All clients failed to start.", self.container_id) + raise failed[0] + if failed: + log.warning("%r: %r clients failed to start.", self.container_id, len(failed)) + elif redirects: + await self._handle_redirect(redirects) + except EventHubError: + await self.stop_async() + raise + except Exception as exp: + await self.stop_async() + raise EventHubError(str(exp)) + return failed + + async def stop_async(self): + """ + Stop the EventHubClient and all its Sender/Receiver clients. + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_stop] + :end-before: [END eventhub_client_async_stop] + :language: python + :dedent: 4 + :caption: Stop the EventHubClient and all its Sender/Receiver clients. + + """ + log.info("%r: Stopping %r clients", self.container_id, len(self.clients)) + self.stopped = True + await self._close_clients_async() + + async def get_eventhub_info_async(self): + """ + Get details on the specified EventHub async. + + :rtype: dict + """ + alt_creds = { + "username": self._auth_config.get("iot_username"), + "password":self._auth_config.get("iot_password")} + try: + mgmt_auth = self._create_auth(**alt_creds) + mgmt_client = AMQPClientAsync(self.mgmt_target, auth=mgmt_auth, debug=self.debug) + await mgmt_client.open_async() + mgmt_msg = Message(application_properties={'name': self.eh_name}) + response = await mgmt_client.mgmt_request_async( + mgmt_msg, + constants.READ_OPERATION, + op_type=b'com.microsoft:eventhub', + status_code_field=b'status-code', + description_fields=b'status-description') + eh_info = response.get_data() + output = {} + if eh_info: + output['name'] = eh_info[b'name'].decode('utf-8') + output['type'] = eh_info[b'type'].decode('utf-8') + output['created_at'] = datetime.datetime.fromtimestamp(float(eh_info[b'created_at'])/1000) + output['partition_count'] = eh_info[b'partition_count'] + output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] + return output + finally: + await mgmt_client.close_async() + + def add_async_receiver( + self, consumer_group, partition, offset=None, prefetch=300, + operation=None, keep_alive=30, auto_reconnect=True, loop=None): + """ + Add an async receiver to the client for a particular consumer group and partition. + + :param consumer_group: The name of the consumer group. + :type consumer_group: str + :param partition: The ID of the partition. + :type partition: str + :param offset: The offset from which to start receiving. + :type offset: ~azure.eventhub.common.Offset + :param prefetch: The message prefetch count of the receiver. Default is 300. + :type prefetch: int + :operation: An optional operation to be appended to the hostname in the source URL. + The value must start with `/` character. + :type operation: str + :rtype: ~azure.eventhub.async_ops.receiver_async.ReceiverAsync + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START create_eventhub_client_async_receiver] + :end-before: [END create_eventhub_client_async_receiver] + :language: python + :dedent: 4 + :caption: Add an async receiver to the client for a particular consumer group and partition. + + """ + path = self.address.path + operation if operation else self.address.path + source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( + self.address.hostname, path, consumer_group, partition) + handler = AsyncReceiver( + self, source_url, offset=offset, prefetch=prefetch, + keep_alive=keep_alive, auto_reconnect=auto_reconnect, loop=loop) + self.clients.append(handler) + return handler + + def add_async_epoch_receiver( + self, consumer_group, partition, epoch, prefetch=300, + operation=None, keep_alive=30, auto_reconnect=True, loop=None): + """ + Add an async receiver to the client with an epoch value. Only a single epoch receiver + can connect to a partition at any given time - additional epoch receivers must have + a higher epoch value or they will be rejected. If a 2nd epoch receiver has + connected, the first will be closed. + + :param consumer_group: The name of the consumer group. + :type consumer_group: str + :param partition: The ID of the partition. + :type partition: str + :param epoch: The epoch value for the receiver. + :type epoch: int + :param prefetch: The message prefetch count of the receiver. Default is 300. + :type prefetch: int + :operation: An optional operation to be appended to the hostname in the source URL. + The value must start with `/` character. + :type operation: str + :rtype: ~azure.eventhub.async_ops.receiver_async.ReceiverAsync + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START create_eventhub_client_async_epoch_receiver] + :end-before: [END create_eventhub_client_async_epoch_receiver] + :language: python + :dedent: 4 + :caption: Add an async receiver to the client with an epoch value. + + """ + path = self.address.path + operation if operation else self.address.path + source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( + self.address.hostname, path, consumer_group, partition) + handler = AsyncReceiver( + self, source_url, prefetch=prefetch, epoch=epoch, + keep_alive=keep_alive, auto_reconnect=auto_reconnect, loop=loop) + self.clients.append(handler) + return handler + + def add_async_sender( + self, partition=None, operation=None, send_timeout=60, + keep_alive=30, auto_reconnect=True, loop=None): + """ + Add an async sender to the client to send ~azure.eventhub.common.EventData object + to an EventHub. + + :param partition: Optionally specify a particular partition to send to. + If omitted, the events will be distributed to available partitions via + round-robin. + :type partition: str + :operation: An optional operation to be appended to the hostname in the target URL. + The value must start with `/` character. + :type operation: str + :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is + queued. Default value is 60 seconds. If set to 0, there will be no timeout. + :type send_timeout: int + :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during + periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not + be pinged. + :type keep_alive: int + :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. + Default value is `True`. + :type auto_reconnect: bool + :rtype: ~azure.eventhub.async_ops.sender_async.SenderAsync + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START create_eventhub_client_async_sender] + :end-before: [END create_eventhub_client_async_sender] + :language: python + :dedent: 4 + :caption: Add an async sender to the client to + send ~azure.eventhub.common.EventData object to an EventHub. + + """ + target = "amqps://{}{}".format(self.address.hostname, self.address.path) + if operation: + target = target + operation + handler = AsyncSender( + self, target, partition=partition, send_timeout=send_timeout, keep_alive=keep_alive, + auto_reconnect=auto_reconnect, loop=loop) + self.clients.append(handler) + return handler diff --git a/azure-eventhubs/azure/eventhub/async_ops/receiver_async.py b/azure-eventhubs/azure/eventhub/async_ops/receiver_async.py new file mode 100644 index 000000000000..3dc17b57a689 --- /dev/null +++ b/azure-eventhubs/azure/eventhub/async_ops/receiver_async.py @@ -0,0 +1,315 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import asyncio +import uuid +import logging + +from uamqp import errors, types +from uamqp import ReceiveClientAsync, Source + +from azure.eventhub import EventHubError, EventData +from azure.eventhub.receiver import Receiver +from azure.eventhub.common import _error_handler + +log = logging.getLogger(__name__) + + +class AsyncReceiver(Receiver): + """ + Implements the async API of a Receiver. + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START create_eventhub_client_async_receiver_instance] + :end-before: [END create_eventhub_client_async_receiver_instance] + :language: python + :dedent: 4 + :caption: Create a new instance of the Async Receiver. + + """ + + def __init__( # pylint: disable=super-init-not-called + self, client, source, offset=None, prefetch=300, epoch=None, + keep_alive=None, auto_reconnect=True, loop=None): + """ + Instantiate an async receiver. + + :param client: The parent EventHubClientAsync. + :type client: ~azure.eventhub.async_ops.EventHubClientAsync + :param source: The source EventHub from which to receive events. + :type source: ~uamqp.address.Source + :param prefetch: The number of events to prefetch from the service + for processing. Default is 300. + :type prefetch: int + :param epoch: An optional epoch value. + :type epoch: int + :param loop: An event loop. + """ + self.loop = loop or asyncio.get_event_loop() + self.running = False + self.client = client + self.source = source + self.offset = offset + self.prefetch = prefetch + self.epoch = epoch + self.keep_alive = keep_alive + self.auto_reconnect = auto_reconnect + self.retry_policy = errors.ErrorPolicy(max_retries=3, on_error=_error_handler) + self.reconnect_backoff = 1 + self.redirected = None + self.error = None + self.properties = None + partition = self.source.split('/')[-1] + self.name = "EHReceiver-{}-partition{}".format(uuid.uuid4(), partition) + source = Source(self.source) + if self.offset is not None: + source.set_filter(self.offset.selector()) + if epoch: + self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} + self._handler = ReceiveClientAsync( + source, + auth=self.client.get_auth(), + debug=self.client.debug, + prefetch=self.prefetch, + link_properties=self.properties, + timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties(), + loop=self.loop) + + async def open_async(self): + """ + Open the Receiver using the supplied conneciton. + If the handler has previously been redirected, the redirect + context will be used to create a new handler before opening it. + + :param connection: The underlying client shared connection. + :type: connection: ~uamqp.async_ops.connection_async.ConnectionAsync + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_receiver_open] + :end-before: [END eventhub_client_async_receiver_open] + :language: python + :dedent: 4 + :caption: Open the Receiver using the supplied conneciton. + + """ + # pylint: disable=protected-access + self.running = True + if self.redirected: + self.source = self.redirected.address + source = Source(self.source) + if self.offset is not None: + source.set_filter(self.offset.selector()) + alt_creds = { + "username": self.client._auth_config.get("iot_username"), + "password":self.client._auth_config.get("iot_password")} + self._handler = ReceiveClientAsync( + source, + auth=self.client.get_auth(**alt_creds), + debug=self.client.debug, + prefetch=self.prefetch, + link_properties=self.properties, + timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties(), + loop=self.loop) + await self._handler.open_async() + while not await self._handler.client_ready_async(): + await asyncio.sleep(0.05) + + async def _reconnect_async(self): # pylint: disable=too-many-statements + # pylint: disable=protected-access + alt_creds = { + "username": self.client._auth_config.get("iot_username"), + "password":self.client._auth_config.get("iot_password")} + await self._handler.close_async() + source = Source(self.source) + if self.offset is not None: + source.set_filter(self.offset.selector()) + self._handler = ReceiveClientAsync( + source, + auth=self.client.get_auth(**alt_creds), + debug=self.client.debug, + prefetch=self.prefetch, + link_properties=self.properties, + timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties(), + loop=self.loop) + try: + await self._handler.open_async() + while not await self._handler.client_ready_async(): + await asyncio.sleep(0.05) + return True + except errors.TokenExpired as shutdown: + log.info("AsyncReceiver disconnected due to token expiry. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close_async(exception=error) + raise error + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("AsyncReceiver detached. Attempting reconnect.") + return False + log.info("AsyncReceiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close_async(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("AsyncReceiver detached. Attempting reconnect.") + return False + log.info("AsyncReceiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close_async(exception=error) + raise error + except errors.AMQPConnectionError as shutdown: + if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: + log.info("AsyncReceiver couldn't authenticate. Attempting reconnect.") + return False + log.info("AsyncReceiver connection error (%r). Shutting down.", shutdown) + error = EventHubError(str(shutdown)) + await self.close_async(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Receiver reconnect failed: {}".format(e)) + await self.close_async(exception=error) + raise error + + async def reconnect_async(self): + """If the Receiver was disconnected from the service with + a retryable error - attempt to reconnect.""" + while not await self._reconnect_async(): + await asyncio.sleep(self.reconnect_backoff) + + async def has_started(self): + """ + Whether the handler has completed all start up processes such as + establishing the connection, session, link and authentication, and + is not ready to process messages. + **This function is now deprecated and will be removed in v2.0+.** + + :rtype: bool + """ + # pylint: disable=protected-access + timeout = False + auth_in_progress = False + if self._handler._connection.cbs: + timeout, auth_in_progress = await self._handler._auth.handle_token_async() + if timeout: + raise EventHubError("Authorization timeout.") + if auth_in_progress: + return False + if not await self._handler._client_ready_async(): + return False + return True + + async def close_async(self, exception=None): + """ + Close down the handler. If the handler has already closed, + this will be a no op. An optional exception can be passed in to + indicate that the handler was shutdown due to error. + + :param exception: An optional exception if the handler is closing + due to an error. + :type exception: Exception + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_receiver_close] + :end-before: [END eventhub_client_async_receiver_close] + :language: python + :dedent: 4 + :caption: Close down the handler. + + """ + self.running = False + if self.error: + return + if isinstance(exception, errors.LinkRedirect): + self.redirected = exception + elif isinstance(exception, EventHubError): + self.error = exception + elif isinstance(exception, (errors.LinkDetach, errors.ConnectionClose)): + self.error = EventHubError(str(exception), exception) + elif exception: + self.error = EventHubError(str(exception)) + else: + self.error = EventHubError("This receive handler is now closed.") + await self._handler.close_async() + + async def receive(self, max_batch_size=None, timeout=None): + """ + Receive events asynchronously from the EventHub. + + :param max_batch_size: Receive a batch of events. Batch size will + be up to the maximum specified, but will return as soon as service + returns no new events. If combined with a timeout and no events are + retrieve before the time, the result will be empty. If no batch + size is supplied, the prefetch size will be the maximum. + :type max_batch_size: int + :rtype: list[~azure.eventhub.common.EventData] + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_receive] + :end-before: [END eventhub_client_async_receive] + :language: python + :dedent: 4 + :caption: Sends an event data and asynchronously waits + until acknowledgement is received or operation times out. + + """ + if self.error: + raise self.error + if not self.running: + raise ValueError("Unable to receive until client has been started.") + data_batch = [] + try: + timeout_ms = 1000 * timeout if timeout else 0 + message_batch = await self._handler.receive_message_batch_async( + max_batch_size=max_batch_size, + timeout=timeout_ms) + for message in message_batch: + event_data = EventData(message=message) + self.offset = event_data.offset + data_batch.append(event_data) + return data_batch + except (errors.TokenExpired, errors.AuthenticationException): + log.info("AsyncReceiver disconnected due to token error. Attempting reconnect.") + await self.reconnect_async() + return data_batch + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("AsyncReceiver detached. Attempting reconnect.") + await self.reconnect_async() + return data_batch + log.info("AsyncReceiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close_async(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("AsyncReceiver detached. Attempting reconnect.") + await self.reconnect_async() + return data_batch + log.info("AsyncReceiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close_async(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Receive failed: {}".format(e)) + await self.close_async(exception=error) + raise error diff --git a/azure-eventhubs/azure/eventhub/async_ops/sender_async.py b/azure-eventhubs/azure/eventhub/async_ops/sender_async.py new file mode 100644 index 000000000000..e2fb1cbb7022 --- /dev/null +++ b/azure-eventhubs/azure/eventhub/async_ops/sender_async.py @@ -0,0 +1,330 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import uuid +import asyncio +import logging + +from uamqp import constants, errors +from uamqp import SendClientAsync + +from azure.eventhub import EventHubError +from azure.eventhub.sender import Sender +from azure.eventhub.common import _error_handler + +log = logging.getLogger(__name__) + + +class AsyncSender(Sender): + """ + Implements the async API of a Sender. + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START create_eventhub_client_async_sender_instance] + :end-before: [END create_eventhub_client_async_sender_instance] + :language: python + :dedent: 4 + :caption: Create a new instance of the Async Sender. + + """ + + def __init__( # pylint: disable=super-init-not-called + self, client, target, partition=None, send_timeout=60, + keep_alive=None, auto_reconnect=True, loop=None): + """ + Instantiate an EventHub event SenderAsync handler. + + :param client: The parent EventHubClientAsync. + :type client: ~azure.eventhub.async_ops.EventHubClientAsync + :param target: The URI of the EventHub to send to. + :type target: str + :param partition: The specific partition ID to send to. Default is `None`, in which case the service + will assign to all partitions using round-robin. + :type partition: str + :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is + queued. Default value is 60 seconds. If set to 0, there will be no timeout. + :type send_timeout: int + :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during + periods of inactivity. The default value is `None`, i.e. no keep alive pings. + :type keep_alive: int + :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. + Default value is `True`. + :type auto_reconnect: bool + :param loop: An event loop. If not specified the default event loop will be used. + """ + self.loop = loop or asyncio.get_event_loop() + self.running = False + self.client = client + self.target = target + self.partition = partition + self.keep_alive = keep_alive + self.auto_reconnect = auto_reconnect + self.timeout = send_timeout + self.retry_policy = errors.ErrorPolicy(max_retries=3, on_error=_error_handler) + self.reconnect_backoff = 1 + self.name = "EHSender-{}".format(uuid.uuid4()) + self.redirected = None + self.error = None + if partition: + self.target += "/Partitions/" + partition + self.name += "-partition{}".format(partition) + self._handler = SendClientAsync( + self.target, + auth=self.client.get_auth(), + debug=self.client.debug, + msg_timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties(), + loop=self.loop) + self._outcome = None + self._condition = None + + async def open_async(self): + """ + Open the Sender using the supplied conneciton. + If the handler has previously been redirected, the redirect + context will be used to create a new handler before opening it. + + :param connection: The underlying client shared connection. + :type: connection: ~uamqp.async_ops.connection_async.ConnectionAsync + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_sender_open] + :end-before: [END eventhub_client_async_sender_open] + :language: python + :dedent: 4 + :caption: Open the Sender using the supplied conneciton. + + """ + self.running = True + if self.redirected: + self.target = self.redirected.address + self._handler = SendClientAsync( + self.target, + auth=self.client.get_auth(), + debug=self.client.debug, + msg_timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties(), + loop=self.loop) + await self._handler.open_async() + while not await self._handler.client_ready_async(): + await asyncio.sleep(0.05) + + async def _reconnect_async(self): + await self._handler.close_async() + unsent_events = self._handler.pending_messages + self._handler = SendClientAsync( + self.target, + auth=self.client.get_auth(), + debug=self.client.debug, + msg_timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties(), + loop=self.loop) + try: + await self._handler.open_async() + self._handler.queue_message(*unsent_events) + await self._handler.wait_async() + return True + except errors.TokenExpired as shutdown: + log.info("AsyncSender disconnected due to token expiry. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close_async(exception=error) + raise error + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("AsyncSender detached. Attempting reconnect.") + return False + log.info("AsyncSender reconnect failed. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close_async(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("AsyncSender detached. Attempting reconnect.") + return False + log.info("AsyncSender reconnect failed. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close_async(exception=error) + raise error + except errors.AMQPConnectionError as shutdown: + if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: + log.info("AsyncSender couldn't authenticate. Attempting reconnect.") + return False + log.info("AsyncSender connection error (%r). Shutting down.", shutdown) + error = EventHubError(str(shutdown)) + await self.close_async(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Sender reconnect failed: {}".format(e)) + await self.close_async(exception=error) + raise error + + async def reconnect_async(self): + """If the Receiver was disconnected from the service with + a retryable error - attempt to reconnect.""" + while not await self._reconnect_async(): + await asyncio.sleep(self.reconnect_backoff) + + async def has_started(self): + """ + Whether the handler has completed all start up processes such as + establishing the connection, session, link and authentication, and + is not ready to process messages. + **This function is now deprecated and will be removed in v2.0+.** + + :rtype: bool + """ + # pylint: disable=protected-access + timeout = False + auth_in_progress = False + if self._handler._connection.cbs: + timeout, auth_in_progress = await self._handler._auth.handle_token_async() + if timeout: + raise EventHubError("Authorization timeout.") + if auth_in_progress: + return False + if not await self._handler._client_ready_async(): + return False + return True + + async def close_async(self, exception=None): + """ + Close down the handler. If the handler has already closed, + this will be a no op. An optional exception can be passed in to + indicate that the handler was shutdown due to error. + + :param exception: An optional exception if the handler is closing + due to an error. + :type exception: Exception + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_sender_close] + :end-before: [END eventhub_client_async_sender_close] + :language: python + :dedent: 4 + :caption: Close down the handler. + + """ + self.running = False + if self.error: + return + if isinstance(exception, errors.LinkRedirect): + self.redirected = exception + elif isinstance(exception, EventHubError): + self.error = exception + elif isinstance(exception, (errors.LinkDetach, errors.ConnectionClose)): + self.error = EventHubError(str(exception), exception) + elif exception: + self.error = EventHubError(str(exception)) + else: + self.error = EventHubError("This send handler is now closed.") + await self._handler.close_async() + + async def send(self, event_data): + """ + Sends an event data and asynchronously waits until + acknowledgement is received or operation times out. + + :param event_data: The event to be sent. + :type event_data: ~azure.eventhub.common.EventData + :raises: ~azure.eventhub.common.EventHubError if the message fails to + send. + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_send] + :end-before: [END eventhub_client_async_send] + :language: python + :dedent: 4 + :caption: Sends an event data and asynchronously waits + until acknowledgement is received or operation times out. + + """ + if self.error: + raise self.error + if not self.running: + raise ValueError("Unable to send until client has been started.") + if event_data.partition_key and self.partition: + raise ValueError("EventData partition key cannot be used with a partition sender.") + event_data.message.on_send_complete = self._on_outcome + try: + await self._handler.send_message_async(event_data.message) + if self._outcome != constants.MessageSendResult.Ok: + raise Sender._error(self._outcome, self._condition) + except (errors.TokenExpired, errors.AuthenticationException): + log.info("AsyncSender disconnected due to token error. Attempting reconnect.") + await self.reconnect_async() + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("AsyncSender detached. Attempting reconnect.") + await self.reconnect_async() + else: + log.info("AsyncSender detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close_async(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("AsyncSender detached. Attempting reconnect.") + await self.reconnect_async() + else: + log.info("AsyncSender detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close_async(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Send failed: {}".format(e)) + await self.close_async(exception=error) + raise error + else: + return self._outcome + + async def wait_async(self): + """ + Wait until all transferred events have been sent. + """ + if self.error: + raise self.error + if not self.running: + raise ValueError("Unable to send until client has been started.") + try: + await self._handler.wait_async() + except (errors.TokenExpired, errors.AuthenticationException): + log.info("AsyncSender disconnected due to token error. Attempting reconnect.") + await self.reconnect_async() + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("AsyncSender detached. Attempting reconnect.") + await self.reconnect_async() + else: + log.info("AsyncSender detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close_async(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("AsyncSender detached. Attempting reconnect.") + await self.reconnect_async() + else: + log.info("AsyncSender detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close_async(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r).", e) + raise EventHubError("Send failed: {}".format(e)) diff --git a/azure-eventhubs/azure/eventhub/client.py b/azure-eventhubs/azure/eventhub/client.py new file mode 100644 index 000000000000..a50babfca8c3 --- /dev/null +++ b/azure-eventhubs/azure/eventhub/client.py @@ -0,0 +1,551 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +from __future__ import unicode_literals + +import logging +import datetime +import sys +import uuid +import time +import functools +try: + from urlparse import urlparse + from urllib import unquote_plus, urlencode, quote_plus +except ImportError: + from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus + +import uamqp +from uamqp import Message +from uamqp import authentication +from uamqp import constants + +from azure.eventhub import __version__ +from azure.eventhub.sender import Sender +from azure.eventhub.receiver import Receiver +from azure.eventhub.common import EventHubError, parse_sas_token + + +log = logging.getLogger(__name__) + + +def _parse_conn_str(conn_str): + endpoint = None + shared_access_key_name = None + shared_access_key = None + entity_path = None + for element in conn_str.split(';'): + key, _, value = element.partition('=') + if key.lower() == 'endpoint': + endpoint = value.rstrip('/') + elif key.lower() == 'hostname': + endpoint = value.rstrip('/') + elif key.lower() == 'sharedaccesskeyname': + shared_access_key_name = value + elif key.lower() == 'sharedaccesskey': + shared_access_key = value + elif key.lower() == 'entitypath': + entity_path = value + if not all([endpoint, shared_access_key_name, shared_access_key]): + raise ValueError("Invalid connection string") + return endpoint, shared_access_key_name, shared_access_key, entity_path + + +def _generate_sas_token(uri, policy, key, expiry=None): + """Create a shared access signiture token as a string literal. + :returns: SAS token as string literal. + :rtype: str + """ + from base64 import b64encode, b64decode + from hashlib import sha256 + from hmac import HMAC + if not expiry: + expiry = time.time() + 3600 # Default to 1 hour. + encoded_uri = quote_plus(uri) + ttl = int(expiry) + sign_key = '%s\n%d' % (encoded_uri, ttl) + signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest()) + result = { + 'sr': uri, + 'sig': signature, + 'se': str(ttl)} + if policy: + result['skn'] = policy + return 'SharedAccessSignature ' + urlencode(result) + + +def _build_uri(address, entity): + parsed = urlparse(address) + if parsed.path: + return address + if not entity: + raise ValueError("No EventHub specified") + address += "/" + str(entity) + return address + + +class EventHubClient(object): + """ + The EventHubClient class defines a high level interface for sending + events to and receiving events from the Azure Event Hubs service. + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_eventhub_client] + :end-before: [END create_eventhub_client] + :language: python + :dedent: 4 + :caption: Create a new instance of the Event Hub client + + """ + + def __init__( + self, address, username=None, password=None, debug=False, + http_proxy=None, auth_timeout=60, sas_token=None): + """ + Constructs a new EventHubClient with the given address URL. + + :param address: The full URI string of the Event Hub. This can optionally + include URL-encoded access name and key. + :type address: str + :param username: The name of the shared access policy. This must be supplied + if not encoded into the address. + :type username: str + :param password: The shared access key. This must be supplied if not encoded + into the address. + :type password: str + :param debug: Whether to output network trace logs to the logger. Default + is `False`. + :type debug: bool + :param http_proxy: HTTP proxy settings. This must be a dictionary with the following + keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). + Additionally the following keys may also be present: 'username', 'password'. + :type http_proxy: dict[str, Any] + :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. + The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. + :type auth_timeout: int + :param sas_token: A SAS token or function that returns a SAS token. If a function is supplied, + it will be used to retrieve subsequent tokens in the case of token expiry. The function should + take no arguments. + :type sas_token: str or callable + """ + self.container_id = "eventhub.pysdk-" + str(uuid.uuid4())[:8] + self.sas_token = sas_token + self.address = urlparse(address) + self.eh_name = self.address.path.lstrip('/') + self.http_proxy = http_proxy + self.mgmt_target = "amqps://{}/{}".format(self.address.hostname, self.eh_name) + url_username = unquote_plus(self.address.username) if self.address.username else None + username = username or url_username + url_password = unquote_plus(self.address.password) if self.address.password else None + password = password or url_password + if (not username or not password) and not sas_token: + raise ValueError("Please supply either username and password, or a SAS token") + self.auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) + self._auth_config = {'username': username, 'password': password} + self.get_auth = functools.partial(self._create_auth) + self.debug = debug + self.auth_timeout = auth_timeout + + self.clients = [] + self.stopped = False + log.info("%r: Created the Event Hub client", self.container_id) + + @classmethod + def from_sas_token(cls, address, sas_token, eventhub=None, **kwargs): + """Create an EventHubClient from an existing auth token or token generator. + + :param address: The Event Hub address URL + :type address: str + :param sas_token: A SAS token or function that returns a SAS token. If a function is supplied, + it will be used to retrieve subsequent tokens in the case of token expiry. The function should + take no arguments. + :type sas_token: str or callable + :param eventhub: The name of the EventHub, if not already included in the address URL. + :type eventhub: str + :param debug: Whether to output network trace logs to the logger. Default + is `False`. + :type debug: bool + :param http_proxy: HTTP proxy settings. This must be a dictionary with the following + keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). + Additionally the following keys may also be present: 'username', 'password'. + :type http_proxy: dict[str, Any] + :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. + The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. + :type auth_timeout: int + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_eventhub_client_sas_token] + :end-before: [END create_eventhub_client_sas_token] + :language: python + :dedent: 4 + :caption: Create an EventHubClient from an existing auth token or token generator. + + """ + address = _build_uri(address, eventhub) + return cls(address, sas_token=sas_token, **kwargs) + + @classmethod + def from_connection_string(cls, conn_str, eventhub=None, **kwargs): + """Create an EventHubClient from a connection string. + + :param conn_str: The connection string. + :type conn_str: str + :param eventhub: The name of the EventHub, if the EntityName is + not included in the connection string. + :type eventhub: str + :param debug: Whether to output network trace logs to the logger. Default + is `False`. + :type debug: bool + :param http_proxy: HTTP proxy settings. This must be a dictionary with the following + keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). + Additionally the following keys may also be present: 'username', 'password'. + :type http_proxy: dict[str, Any] + :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. + The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. + :type auth_timeout: int + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_eventhub_client_connstr] + :end-before: [END create_eventhub_client_connstr] + :language: python + :dedent: 4 + :caption: Create an EventHubClient from a connection string. + + """ + address, policy, key, entity = _parse_conn_str(conn_str) + entity = eventhub or entity + address = _build_uri(address, entity) + return cls(address, username=policy, password=key, **kwargs) + + @classmethod + def from_iothub_connection_string(cls, conn_str, **kwargs): + """ + Create an EventHubClient from an IoTHub connection string. + + :param conn_str: The connection string. + :type conn_str: str + :param debug: Whether to output network trace logs to the logger. Default + is `False`. + :type debug: bool + :param http_proxy: HTTP proxy settings. This must be a dictionary with the following + keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). + Additionally the following keys may also be present: 'username', 'password'. + :type http_proxy: dict[str, Any] + :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. + The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. + :type auth_timeout: int + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_eventhub_client_iot_connstr] + :end-before: [END create_eventhub_client_iot_connstr] + :language: python + :dedent: 4 + :caption: Create an EventHubClient from an IoTHub connection string. + + """ + address, policy, key, _ = _parse_conn_str(conn_str) + hub_name = address.split('.')[0] + username = "{}@sas.root.{}".format(policy, hub_name) + password = _generate_sas_token(address, policy, key) + client = cls("amqps://" + address, username=username, password=password, **kwargs) + client._auth_config = { # pylint: disable=protected-access + 'iot_username': policy, + 'iot_password': key, + 'username': username, + 'password': password} + return client + + def _create_auth(self, username=None, password=None): + """ + Create an ~uamqp.authentication.SASTokenAuth instance to authenticate + the session. + + :param username: The name of the shared access policy. + :type username: str + :param password: The shared access key. + :type password: str + """ + if self.sas_token: + token = self.sas_token() if callable(self.sas_token) else self.sas_token + try: + expiry = int(parse_sas_token(token)['se']) + except (KeyError, TypeError, IndexError): + raise ValueError("Supplied SAS token has no valid expiry value.") + return authentication.SASTokenAuth( + self.auth_uri, self.auth_uri, token, + expires_at=expiry, + timeout=self.auth_timeout, + http_proxy=self.http_proxy) + + username = username or self._auth_config['username'] + password = password or self._auth_config['password'] + if "@sas.root" in username: + return authentication.SASLPlain( + self.address.hostname, username, password, http_proxy=self.http_proxy) + return authentication.SASTokenAuth.from_shared_access_key( + self.auth_uri, username, password, timeout=self.auth_timeout, http_proxy=self.http_proxy) + + def create_properties(self): # pylint: disable=no-self-use + """ + Format the properties with which to instantiate the connection. + This acts like a user agent over HTTP. + + :rtype: dict + """ + properties = {} + properties["product"] = "eventhub.python" + properties["version"] = __version__ + properties["framework"] = "Python {}.{}.{}".format(*sys.version_info[0:3]) + properties["platform"] = sys.platform + return properties + + def _close_clients(self): + """ + Close all open Sender/Receiver clients. + """ + for client in self.clients: + client.close() + + def _start_clients(self): + for client in self.clients: + try: + if not client.running: + client.open() + except Exception as exp: # pylint: disable=broad-except + client.close(exception=exp) + + def _process_redirect_uri(self, redirect): + redirect_uri = redirect.address.decode('utf-8') + auth_uri, _, _ = redirect_uri.partition("/ConsumerGroups") + self.address = urlparse(auth_uri) + self.auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) + self.eh_name = self.address.path.lstrip('/') + self.mgmt_target = redirect_uri + + def _handle_redirect(self, redirects): + if len(redirects) != len(self.clients): + raise EventHubError("Some clients are attempting to redirect the connection.") + if not all(r.hostname == redirects[0].hostname for r in redirects): + raise EventHubError("Multiple clients attempting to redirect to different hosts.") + self._process_redirect_uri(redirects[0]) + for client in self.clients: + client.open() + + def run(self): + """ + Run the EventHubClient in blocking mode. + Opens the connection and starts running all Sender/Receiver clients. + Returns a list of the start up results. For a succcesful client start the + result will be `None`, otherwise the exception raised. + If all clients failed to start, then run will fail, shut down the connection + and raise an exception. + If at least one client starts up successfully the run command will succeed. + + :rtype: list[~azure.eventhub.common.EventHubError] + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_run] + :end-before: [END eventhub_client_run] + :language: python + :dedent: 4 + :caption: Run the EventHubClient in blocking mode. + + """ + log.info("%r: Starting %r clients", self.container_id, len(self.clients)) + try: + self._start_clients() + redirects = [c.redirected for c in self.clients if c.redirected] + failed = [c.error for c in self.clients if c.error] + if failed and len(failed) == len(self.clients): + log.warning("%r: All clients failed to start.", self.container_id) + raise failed[0] + if failed: + log.warning("%r: %r clients failed to start.", self.container_id, len(failed)) + elif redirects: + self._handle_redirect(redirects) + except EventHubError: + self.stop() + raise + except Exception as e: + self.stop() + raise EventHubError(str(e)) + return failed + + def stop(self): + """ + Stop the EventHubClient and all its Sender/Receiver clients. + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_stop] + :end-before: [END eventhub_client_stop] + :language: python + :dedent: 4 + :caption: Stop the EventHubClient and all its Sender/Receiver clients. + + """ + log.info("%r: Stopping %r clients", self.container_id, len(self.clients)) + self.stopped = True + self._close_clients() + + def get_eventhub_info(self): + """ + Get details on the specified EventHub. + Keys in the details dictionary include: + + -'name' + -'type' + -'created_at' + -'partition_count' + -'partition_ids' + + :rtype: dict + """ + alt_creds = { + "username": self._auth_config.get("iot_username"), + "password":self._auth_config.get("iot_password")} + try: + mgmt_auth = self._create_auth(**alt_creds) + mgmt_client = uamqp.AMQPClient(self.mgmt_target, auth=mgmt_auth, debug=self.debug) + mgmt_client.open() + mgmt_msg = Message(application_properties={'name': self.eh_name}) + response = mgmt_client.mgmt_request( + mgmt_msg, + constants.READ_OPERATION, + op_type=b'com.microsoft:eventhub', + status_code_field=b'status-code', + description_fields=b'status-description') + eh_info = response.get_data() + output = {} + if eh_info: + output['name'] = eh_info[b'name'].decode('utf-8') + output['type'] = eh_info[b'type'].decode('utf-8') + output['created_at'] = datetime.datetime.fromtimestamp(float(eh_info[b'created_at'])/1000) + output['partition_count'] = eh_info[b'partition_count'] + output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] + return output + finally: + mgmt_client.close() + + def add_receiver( + self, consumer_group, partition, offset=None, prefetch=300, + operation=None, keep_alive=30, auto_reconnect=True): + """ + Add a receiver to the client for a particular consumer group and partition. + + :param consumer_group: The name of the consumer group. + :type consumer_group: str + :param partition: The ID of the partition. + :type partition: str + :param offset: The offset from which to start receiving. + :type offset: ~azure.eventhub.common.Offset + :param prefetch: The message prefetch count of the receiver. Default is 300. + :type prefetch: int + :operation: An optional operation to be appended to the hostname in the source URL. + The value must start with `/` character. + :type operation: str + :rtype: ~azure.eventhub.receiver.Receiver + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_eventhub_client_receiver] + :end-before: [END create_eventhub_client_receiver] + :language: python + :dedent: 4 + :caption: Add a receiver to the client for a particular consumer group and partition. + + """ + path = self.address.path + operation if operation else self.address.path + source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( + self.address.hostname, path, consumer_group, partition) + handler = Receiver( + self, source_url, offset=offset, prefetch=prefetch, + keep_alive=keep_alive, auto_reconnect=auto_reconnect) + self.clients.append(handler) + return handler + + def add_epoch_receiver( + self, consumer_group, partition, epoch, prefetch=300, + operation=None, keep_alive=30, auto_reconnect=True): + """ + Add a receiver to the client with an epoch value. Only a single epoch receiver + can connect to a partition at any given time - additional epoch receivers must have + a higher epoch value or they will be rejected. If a 2nd epoch receiver has + connected, the first will be closed. + + :param consumer_group: The name of the consumer group. + :type consumer_group: str + :param partition: The ID of the partition. + :type partition: str + :param epoch: The epoch value for the receiver. + :type epoch: int + :param prefetch: The message prefetch count of the receiver. Default is 300. + :type prefetch: int + :operation: An optional operation to be appended to the hostname in the source URL. + The value must start with `/` character. + :type operation: str + :rtype: ~azure.eventhub.receiver.Receiver + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_eventhub_client_epoch_receiver] + :end-before: [END create_eventhub_client_epoch_receiver] + :language: python + :dedent: 4 + :caption: Add a receiver to the client with an epoch value. + + """ + path = self.address.path + operation if operation else self.address.path + source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( + self.address.hostname, path, consumer_group, partition) + handler = Receiver( + self, source_url, prefetch=prefetch, epoch=epoch, + keep_alive=keep_alive, auto_reconnect=auto_reconnect) + self.clients.append(handler) + return handler + + def add_sender(self, partition=None, operation=None, send_timeout=60, keep_alive=30, auto_reconnect=True): + """ + Add a sender to the client to send EventData object to an EventHub. + + :param partition: Optionally specify a particular partition to send to. + If omitted, the events will be distributed to available partitions via + round-robin. + :type parition: str + :operation: An optional operation to be appended to the hostname in the target URL. + The value must start with `/` character. + :type operation: str + :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is + queued. Default value is 60 seconds. If set to 0, there will be no timeout. + :type send_timeout: int + :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during + periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not + be pinged. + :type keep_alive: int + :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. + Default value is `True`. + :rtype: ~azure.eventhub.sender.Sender + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_eventhub_client_sender] + :end-before: [END create_eventhub_client_sender] + :language: python + :dedent: 4 + :caption: Add a sender to the client to send EventData object to an EventHub. + + """ + target = "amqps://{}{}".format(self.address.hostname, self.address.path) + if operation: + target = target + operation + handler = Sender( + self, target, partition=partition, send_timeout=send_timeout, + keep_alive=keep_alive, auto_reconnect=auto_reconnect) + self.clients.append(handler) + return handler diff --git a/azure-eventhubs/azure/eventhub/common.py b/azure-eventhubs/azure/eventhub/common.py new file mode 100644 index 000000000000..76e315d2a25e --- /dev/null +++ b/azure-eventhubs/azure/eventhub/common.py @@ -0,0 +1,356 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +from __future__ import unicode_literals + +import datetime +import calendar +import json + +import six + +from uamqp import Message, BatchMessage +from uamqp import types, constants, errors +from uamqp.message import MessageHeader, MessageProperties + +_NO_RETRY_ERRORS = ( + b"com.microsoft:argument-out-of-range", + b"com.microsoft:entity-disabled", + b"com.microsoft:auth-failed", + b"com.microsoft:precondition-failed", + b"com.microsoft:argument-error" +) + +def _error_handler(error): + """ + Called internally when an event has failed to send so we + can parse the error to determine whether we should attempt + to retry sending the event again. + Returns the action to take according to error type. + + :param error: The error received in the send attempt. + :type error: Exception + :rtype: ~uamqp.errors.ErrorAction + """ + if error.condition == b'com.microsoft:server-busy': + return errors.ErrorAction(retry=True, backoff=4) + if error.condition == b'com.microsoft:timeout': + return errors.ErrorAction(retry=True, backoff=2) + if error.condition == b'com.microsoft:operation-cancelled': + return errors.ErrorAction(retry=True) + if error.condition == b"com.microsoft:container-close": + return errors.ErrorAction(retry=True, backoff=4) + if error.condition in _NO_RETRY_ERRORS: + return errors.ErrorAction(retry=False) + return errors.ErrorAction(retry=True) + + +def parse_sas_token(sas_token): + """Parse a SAS token into its components. + + :param sas_token: The SAS token. + :type sas_token: str + :rtype: dict[str, str] + """ + sas_data = {} + token = sas_token.partition(' ')[2] + fields = token.split('&') + for field in fields: + key, value = field.split('=', 1) + sas_data[key.lower()] = value + return sas_data + + +class EventData(object): + """ + The EventData class is a holder of event content. + Acts as a wrapper to an uamqp.message.Message object. + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_event_data] + :end-before: [END create_event_data] + :language: python + :dedent: 4 + :caption: Create instances of EventData + + """ + + PROP_SEQ_NUMBER = b"x-opt-sequence-number" + PROP_OFFSET = b"x-opt-offset" + PROP_PARTITION_KEY = b"x-opt-partition-key" + PROP_TIMESTAMP = b"x-opt-enqueued-time" + PROP_DEVICE_ID = b"iothub-connection-device-id" + + def __init__(self, body=None, batch=None, to_device=None, message=None): + """ + Initialize EventData. + + :param body: The data to send in a single message. + :type body: str, bytes or list + :param batch: A data generator to send batched messages. + :type batch: Generator + :param to_device: An IoT device to route to. + :type to_device: str + :param message: The received message. + :type message: ~uamqp.message.Message + """ + self._partition_key = types.AMQPSymbol(EventData.PROP_PARTITION_KEY) + self._annotations = {} + self._app_properties = {} + self.msg_properties = MessageProperties() + if to_device: + self.msg_properties.to = '/devices/{}/messages/devicebound'.format(to_device) + if batch: + self.message = BatchMessage(data=batch, multi_messages=True, properties=self.msg_properties) + elif message: + self.message = message + self.msg_properties = message.properties + self._annotations = message.annotations + self._app_properties = message.application_properties + else: + if isinstance(body, list) and body: + self.message = Message(body[0], properties=self.msg_properties) + for more in body[1:]: + self.message._body.append(more) # pylint: disable=protected-access + elif body is None: + raise ValueError("EventData cannot be None.") + else: + self.message = Message(body, properties=self.msg_properties) + + @property + def sequence_number(self): + """ + The sequence number of the event data object. + + :rtype: int or long + """ + return self._annotations.get(EventData.PROP_SEQ_NUMBER, None) + + @property + def offset(self): + """ + The offset of the event data object. + + :rtype: ~azure.eventhub.common.Offset + """ + try: + return Offset(self._annotations[EventData.PROP_OFFSET].decode('UTF-8')) + except (KeyError, AttributeError): + return None + + @property + def enqueued_time(self): + """ + The enqueued timestamp of the event data object. + + :rtype: datetime.datetime + """ + timestamp = self._annotations.get(EventData.PROP_TIMESTAMP, None) + if timestamp: + return datetime.datetime.utcfromtimestamp(float(timestamp)/1000) + return None + + @property + def device_id(self): + """ + The device ID of the event data object. This is only used for + IoT Hub implementations. + + :rtype: bytes + """ + return self._annotations.get(EventData.PROP_DEVICE_ID, None) + + @property + def partition_key(self): + """ + The partition key of the event data object. + + :rtype: bytes + """ + try: + return self._annotations[self._partition_key] + except KeyError: + return self._annotations.get(EventData.PROP_PARTITION_KEY, None) + + @partition_key.setter + def partition_key(self, value): + """ + Set the partition key of the event data object. + + :param value: The partition key to set. + :type value: str or bytes + """ + annotations = dict(self._annotations) + annotations[self._partition_key] = value + header = MessageHeader() + header.durable = True + self.message.annotations = annotations + self.message.header = header + self._annotations = annotations + + @property + def application_properties(self): + """ + Application defined properties on the message. + + :rtype: dict + """ + return self._app_properties + + @application_properties.setter + def application_properties(self, value): + """ + Application defined properties on the message. + + :param value: The application properties for the EventData. + :type value: dict + """ + self._app_properties = value + properties = dict(self._app_properties) + self.message.application_properties = properties + + @property + def body(self): + """ + The body of the event data object. + + :rtype: bytes or Generator[bytes] + """ + try: + return self.message.get_data() + except TypeError: + raise ValueError("Message data empty.") + + def body_as_str(self, encoding='UTF-8'): + """ + The body of the event data as a string if the data is of a + compatible type. + + :param encoding: The encoding to use for decoding message data. + Default is 'UTF-8' + :rtype: str or unicode + """ + data = self.body + try: + return "".join(b.decode(encoding) for b in data) + except TypeError: + return six.text_type(data) + except: # pylint: disable=bare-except + pass + try: + return data.decode(encoding) + except Exception as e: + raise TypeError("Message data is not compatible with string type: {}".format(e)) + + def body_as_json(self, encoding='UTF-8'): + """ + The body of the event loaded as a JSON object is the data is compatible. + + :param encoding: The encoding to use for decoding message data. + Default is 'UTF-8' + :rtype: dict + """ + data_str = self.body_as_str(encoding=encoding) + try: + return json.loads(data_str) + except Exception as e: + raise TypeError("Event data is not compatible with JSON type: {}".format(e)) + + +class Offset(object): + """ + The offset (position or timestamp) where a receiver starts. Examples: + + Beginning of the event stream: + >>> offset = Offset("-1") + End of the event stream: + >>> offset = Offset("@latest") + Events after the specified offset: + >>> offset = Offset("12345") + Events from the specified offset: + >>> offset = Offset("12345", True) + Events after a datetime: + >>> offset = Offset(datetime.datetime.utcnow()) + Events after a specific sequence number: + >>> offset = Offset(1506968696002) + """ + + def __init__(self, value, inclusive=False): + """ + Initialize Offset. + + :param value: The offset value. + :type value: ~datetime.datetime or int or str + :param inclusive: Whether to include the supplied value as the start point. + :type inclusive: bool + """ + self.value = value + self.inclusive = inclusive + + def selector(self): + """ + Creates a selector expression of the offset. + + :rtype: bytes + """ + operator = ">=" if self.inclusive else ">" + if isinstance(self.value, datetime.datetime): + timestamp = (calendar.timegm(self.value.utctimetuple()) * 1000) + (self.value.microsecond/1000) + return ("amqp.annotation.x-opt-enqueued-time {} '{}'".format(operator, int(timestamp))).encode('utf-8') + if isinstance(self.value, six.integer_types): + return ("amqp.annotation.x-opt-sequence-number {} '{}'".format(operator, self.value)).encode('utf-8') + return ("amqp.annotation.x-opt-offset {} '{}'".format(operator, self.value)).encode('utf-8') + + +class EventHubError(Exception): + """ + Represents an error happened in the client. + + :ivar message: The error message. + :vartype message: str + :ivar error: The error condition, if available. + :vartype error: str + :ivar details: The error details, if included in the + service response. + :vartype details: dict[str, str] + """ + + def __init__(self, message, details=None): + self.error = None + self.message = message + self.details = details + if isinstance(message, constants.MessageSendResult): + self.message = "Message send failed with result: {}".format(message) + if details and isinstance(details, Exception): + try: + condition = details.condition.value.decode('UTF-8') + except AttributeError: + condition = details.condition.decode('UTF-8') + _, _, self.error = condition.partition(':') + self.message += "\nError: {}".format(self.error) + try: + self._parse_error(details.description) + for detail in self.details: + self.message += "\n{}".format(detail) + except: # pylint: disable=bare-except + self.message += "\n{}".format(details) + super(EventHubError, self).__init__(self.message) + + def _parse_error(self, error_list): + details = [] + self.message = error_list if isinstance(error_list, six.text_type) else error_list.decode('UTF-8') + details_index = self.message.find(" Reference:") + if details_index >= 0: + details_msg = self.message[details_index + 1:] + self.message = self.message[0:details_index] + + tracking_index = details_msg.index(", TrackingId:") + system_index = details_msg.index(", SystemTracker:") + timestamp_index = details_msg.index(", Timestamp:") + details.append(details_msg[:tracking_index]) + details.append(details_msg[tracking_index + 2: system_index]) + details.append(details_msg[system_index + 2: timestamp_index]) + details.append(details_msg[timestamp_index + 2:]) + self.details = details diff --git a/azure-eventhubs/azure/eventhub/receiver.py b/azure-eventhubs/azure/eventhub/receiver.py new file mode 100644 index 000000000000..486c75b3c682 --- /dev/null +++ b/azure-eventhubs/azure/eventhub/receiver.py @@ -0,0 +1,329 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +from __future__ import unicode_literals + +import uuid +import logging +import time + +from uamqp import types, errors +from uamqp import ReceiveClient, Source + +from azure.eventhub.common import EventHubError, EventData, _error_handler + + +log = logging.getLogger(__name__) + + +class Receiver(object): + """ + Implements a Receiver. + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_eventhub_client_receiver_instance] + :end-before: [END create_eventhub_client_receiver_instance] + :language: python + :dedent: 4 + :caption: Create a new instance of the Receiver. + + """ + timeout = 0 + _epoch = b'com.microsoft:epoch' + + def __init__(self, client, source, offset=None, prefetch=300, epoch=None, keep_alive=None, auto_reconnect=True): + """ + Instantiate a receiver. + + :param client: The parent EventHubClient. + :type client: ~azure.eventhub.client.EventHubClient + :param source: The source EventHub from which to receive events. + :type source: str + :param prefetch: The number of events to prefetch from the service + for processing. Default is 300. + :type prefetch: int + :param epoch: An optional epoch value. + :type epoch: int + """ + self.running = False + self.client = client + self.source = source + self.offset = offset + self.prefetch = prefetch + self.epoch = epoch + self.keep_alive = keep_alive + self.auto_reconnect = auto_reconnect + self.retry_policy = errors.ErrorPolicy(max_retries=3, on_error=_error_handler) + self.reconnect_backoff = 1 + self.properties = None + self.redirected = None + self.error = None + partition = self.source.split('/')[-1] + self.name = "EHReceiver-{}-partition{}".format(uuid.uuid4(), partition) + source = Source(self.source) + if self.offset is not None: + source.set_filter(self.offset.selector()) + if epoch: + self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} + self._handler = ReceiveClient( + source, + auth=self.client.get_auth(), + debug=self.client.debug, + prefetch=self.prefetch, + link_properties=self.properties, + timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties()) + + def open(self): + """ + Open the Receiver using the supplied conneciton. + If the handler has previously been redirected, the redirect + context will be used to create a new handler before opening it. + + :param connection: The underlying client shared connection. + :type: connection: ~uamqp.connection.Connection + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_receiver_open] + :end-before: [END eventhub_client_receiver_open] + :language: python + :dedent: 4 + :caption: Open the Receiver using the supplied conneciton. + + """ + # pylint: disable=protected-access + self.running = True + if self.redirected: + self.source = self.redirected.address + source = Source(self.source) + if self.offset is not None: + source.set_filter(self.offset.selector()) + alt_creds = { + "username": self.client._auth_config.get("iot_username"), + "password":self.client._auth_config.get("iot_password")} + self._handler = ReceiveClient( + source, + auth=self.client.get_auth(**alt_creds), + debug=self.client.debug, + prefetch=self.prefetch, + link_properties=self.properties, + timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties()) + self._handler.open() + while not self._handler.client_ready(): + time.sleep(0.05) + + def _reconnect(self): # pylint: disable=too-many-statements + # pylint: disable=protected-access + alt_creds = { + "username": self.client._auth_config.get("iot_username"), + "password": self.client._auth_config.get("iot_password")} + self._handler.close() + source = Source(self.source) + if self.offset is not None: + source.set_filter(self.offset.selector()) + self._handler = ReceiveClient( + source, + auth=self.client.get_auth(**alt_creds), + debug=self.client.debug, + prefetch=self.prefetch, + link_properties=self.properties, + timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties()) + try: + self._handler.open() + while not self._handler.client_ready(): + time.sleep(0.05) + return True + except errors.TokenExpired as shutdown: + log.info("Receiver disconnected due to token expiry. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("Receiver detached. Attempting reconnect.") + return False + log.info("Receiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("Receiver detached. Attempting reconnect.") + return False + log.info("Receiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except errors.AMQPConnectionError as shutdown: + if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: + log.info("Receiver couldn't authenticate. Attempting reconnect.") + return False + log.info("Receiver connection error (%r). Shutting down.", shutdown) + error = EventHubError(str(shutdown)) + self.close(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Receiver reconnect failed: {}".format(e)) + self.close(exception=error) + raise error + + def reconnect(self): + """If the Receiver was disconnected from the service with + a retryable error - attempt to reconnect.""" + while not self._reconnect(): + time.sleep(self.reconnect_backoff) + + def get_handler_state(self): + """ + Get the state of the underlying handler with regards to start + up processes. + + :rtype: ~uamqp.constants.MessageReceiverState + """ + # pylint: disable=protected-access + return self._handler._message_receiver.get_state() + + def has_started(self): + """ + Whether the handler has completed all start up processes such as + establishing the connection, session, link and authentication, and + is not ready to process messages. + **This function is now deprecated and will be removed in v2.0+.** + + :rtype: bool + """ + # pylint: disable=protected-access + timeout = False + auth_in_progress = False + if self._handler._connection.cbs: + timeout, auth_in_progress = self._handler._auth.handle_token() + if timeout: + raise EventHubError("Authorization timeout.") + if auth_in_progress: + return False + if not self._handler._client_ready(): + return False + return True + + def close(self, exception=None): + """ + Close down the handler. If the handler has already closed, + this will be a no op. An optional exception can be passed in to + indicate that the handler was shutdown due to error. + + :param exception: An optional exception if the handler is closing + due to an error. + :type exception: Exception + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_receiver_close] + :end-before: [END eventhub_client_receiver_close] + :language: python + :dedent: 4 + :caption: Close down the handler. + + """ + self.running = False + if self.error: + return + if isinstance(exception, errors.LinkRedirect): + self.redirected = exception + elif isinstance(exception, EventHubError): + self.error = exception + elif exception: + self.error = EventHubError(str(exception)) + else: + self.error = EventHubError("This receive handler is now closed.") + self._handler.close() + + @property + def queue_size(self): + """ + The current size of the unprocessed Event queue. + + :rtype: int + """ + # pylint: disable=protected-access + if self._handler._received_messages: + return self._handler._received_messages.qsize() + return 0 + + def receive(self, max_batch_size=None, timeout=None): + """ + Receive events from the EventHub. + + :param max_batch_size: Receive a batch of events. Batch size will + be up to the maximum specified, but will return as soon as service + returns no new events. If combined with a timeout and no events are + retrieve before the time, the result will be empty. If no batch + size is supplied, the prefetch size will be the maximum. + :type max_batch_size: int + :rtype: list[~azure.eventhub.common.EventData] + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_sync_receive] + :end-before: [END eventhub_client_sync_receive] + :language: python + :dedent: 4 + :caption: Receive events from the EventHub. + + """ + if self.error: + raise self.error + if not self.running: + raise ValueError("Unable to receive until client has been started.") + data_batch = [] + try: + timeout_ms = 1000 * timeout if timeout else 0 + message_batch = self._handler.receive_message_batch( + max_batch_size=max_batch_size, + timeout=timeout_ms) + for message in message_batch: + event_data = EventData(message=message) + self.offset = event_data.offset + data_batch.append(event_data) + return data_batch + except (errors.TokenExpired, errors.AuthenticationException): + log.info("Receiver disconnected due to token error. Attempting reconnect.") + self.reconnect() + return data_batch + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("Receiver detached. Attempting reconnect.") + self.reconnect() + return data_batch + log.info("Receiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("Receiver detached. Attempting reconnect.") + self.reconnect() + return data_batch + log.info("Receiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Receive failed: {}".format(e)) + self.close(exception=error) + raise error diff --git a/azure-eventhubs/azure/eventhub/sender.py b/azure-eventhubs/azure/eventhub/sender.py new file mode 100644 index 000000000000..0a7334050a5f --- /dev/null +++ b/azure-eventhubs/azure/eventhub/sender.py @@ -0,0 +1,388 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +from __future__ import unicode_literals + +import uuid +import logging +import time + +from uamqp import constants, errors +from uamqp import SendClient + +from azure.eventhub.common import EventHubError, _error_handler + +log = logging.getLogger(__name__) + + +class Sender(object): + """ + Implements a Sender. + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_eventhub_client_sender_instance] + :end-before: [END create_eventhub_client_sender_instance] + :language: python + :dedent: 4 + :caption: Create a new instance of the Sender. + + """ + + def __init__(self, client, target, partition=None, send_timeout=60, keep_alive=None, auto_reconnect=True): + """ + Instantiate an EventHub event Sender handler. + + :param client: The parent EventHubClient. + :type client: ~azure.eventhub.client.EventHubClient. + :param target: The URI of the EventHub to send to. + :type target: str + :param partition: The specific partition ID to send to. Default is None, in which case the service + will assign to all partitions using round-robin. + :type partition: str + :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is + queued. Default value is 60 seconds. If set to 0, there will be no timeout. + :type send_timeout: int + :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during + periods of inactivity. The default value is None, i.e. no keep alive pings. + :type keep_alive: int + :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. + Default value is `True`. + :type auto_reconnect: bool + """ + self.running = False + self.client = client + self.target = target + self.partition = partition + self.timeout = send_timeout + self.redirected = None + self.error = None + self.keep_alive = keep_alive + self.auto_reconnect = auto_reconnect + self.retry_policy = errors.ErrorPolicy(max_retries=3, on_error=_error_handler) + self.reconnect_backoff = 1 + self.name = "EHSender-{}".format(uuid.uuid4()) + if partition: + self.target += "/Partitions/" + partition + self.name += "-partition{}".format(partition) + self._handler = SendClient( + self.target, + auth=self.client.get_auth(), + debug=self.client.debug, + msg_timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties()) + self._outcome = None + self._condition = None + + def open(self): + """ + Open the Sender using the supplied conneciton. + If the handler has previously been redirected, the redirect + context will be used to create a new handler before opening it. + + :param connection: The underlying client shared connection. + :type: connection: ~uamqp.connection.Connection + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_sender_open] + :end-before: [END eventhub_client_sender_open] + :language: python + :dedent: 4 + :caption: Open the Sender using the supplied conneciton. + + """ + self.running = True + if self.redirected: + self.target = self.redirected.address + self._handler = SendClient( + self.target, + auth=self.client.get_auth(), + debug=self.client.debug, + msg_timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties()) + self._handler.open() + while not self._handler.client_ready(): + time.sleep(0.05) + + def _reconnect(self): + # pylint: disable=protected-access + self._handler.close() + unsent_events = self._handler.pending_messages + self._handler = SendClient( + self.target, + auth=self.client.get_auth(), + debug=self.client.debug, + msg_timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties()) + try: + self._handler.open() + self._handler.queue_message(*unsent_events) + self._handler.wait() + return True + except errors.TokenExpired as shutdown: + log.info("Sender disconnected due to token expiry. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("Sender detached. Attempting reconnect.") + return False + log.info("Sender reconnect failed. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("Sender detached. Attempting reconnect.") + return False + log.info("Sender reconnect failed. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except errors.AMQPConnectionError as shutdown: + if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: + log.info("Sender couldn't authenticate. Attempting reconnect.") + return False + log.info("Sender connection error (%r). Shutting down.", shutdown) + error = EventHubError(str(shutdown)) + self.close(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Sender Reconnect failed: {}".format(e)) + self.close(exception=error) + raise error + + def reconnect(self): + """If the Sender was disconnected from the service with + a retryable error - attempt to reconnect.""" + while not self._reconnect(): + time.sleep(self.reconnect_backoff) + + def get_handler_state(self): + """ + Get the state of the underlying handler with regards to start + up processes. + + :rtype: ~uamqp.constants.MessageSenderState + """ + # pylint: disable=protected-access + return self._handler._message_sender.get_state() + + def has_started(self): + """ + Whether the handler has completed all start up processes such as + establishing the connection, session, link and authentication, and + is not ready to process messages. + **This function is now deprecated and will be removed in v2.0+.** + + :rtype: bool + """ + # pylint: disable=protected-access + timeout = False + auth_in_progress = False + if self._handler._connection.cbs: + timeout, auth_in_progress = self._handler._auth.handle_token() + if timeout: + raise EventHubError("Authorization timeout.") + if auth_in_progress: + return False + if not self._handler._client_ready(): + return False + return True + + def close(self, exception=None): + """ + Close down the handler. If the handler has already closed, + this will be a no op. An optional exception can be passed in to + indicate that the handler was shutdown due to error. + + :param exception: An optional exception if the handler is closing + due to an error. + :type exception: Exception + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_sender_close] + :end-before: [END eventhub_client_sender_close] + :language: python + :dedent: 4 + :caption: Close down the handler. + + """ + self.running = False + if self.error: + return + if isinstance(exception, errors.LinkRedirect): + self.redirected = exception + elif isinstance(exception, EventHubError): + self.error = exception + elif exception: + self.error = EventHubError(str(exception)) + else: + self.error = EventHubError("This send handler is now closed.") + self._handler.close() + + def send(self, event_data): + """ + Sends an event data and blocks until acknowledgement is + received or operation times out. + + :param event_data: The event to be sent. + :type event_data: ~azure.eventhub.common.EventData + :raises: ~azure.eventhub.common.EventHubError if the message fails to + send. + :return: The outcome of the message send. + :rtype: ~uamqp.constants.MessageSendResult + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_sync_send] + :end-before: [END eventhub_client_sync_send] + :language: python + :dedent: 4 + :caption: Sends an event data and blocks until acknowledgement is received or operation times out. + + """ + if self.error: + raise self.error + if not self.running: + raise ValueError("Unable to send until client has been started.") + if event_data.partition_key and self.partition: + raise ValueError("EventData partition key cannot be used with a partition sender.") + event_data.message.on_send_complete = self._on_outcome + try: + self._handler.send_message(event_data.message) + if self._outcome != constants.MessageSendResult.Ok: + raise Sender._error(self._outcome, self._condition) + except errors.MessageException as failed: + error = EventHubError(str(failed), failed) + self.close(exception=error) + raise error + except (errors.TokenExpired, errors.AuthenticationException): + log.info("Sender disconnected due to token error. Attempting reconnect.") + self.reconnect() + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("Sender detached. Attempting reconnect.") + self.reconnect() + else: + log.info("Sender detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("Sender detached. Attempting reconnect.") + self.reconnect() + else: + log.info("Sender detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Send failed: {}".format(e)) + self.close(exception=error) + raise error + else: + return self._outcome + + def transfer(self, event_data, callback=None): + """ + Transfers an event data and notifies the callback when the operation is done. + + :param event_data: The event to be sent. + :type event_data: ~azure.eventhub.common.EventData + :param callback: Callback to be run once the message has been send. + This must be a function that accepts two arguments. + :type callback: callable[~uamqp.constants.MessageSendResult, ~azure.eventhub.common.EventHubError] + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_transfer] + :end-before: [END eventhub_client_transfer] + :language: python + :dedent: 4 + :caption: Transfers an event data and notifies the callback when the operation is done. + + """ + if self.error: + raise self.error + if not self.running: + raise ValueError("Unable to send until client has been started.") + if event_data.partition_key and self.partition: + raise ValueError("EventData partition key cannot be used with a partition sender.") + if callback: + event_data.message.on_send_complete = lambda o, c: callback(o, Sender._error(o, c)) + self._handler.queue_message(event_data.message) + + def wait(self): + """ + Wait until all transferred events have been sent. + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_transfer] + :end-before: [END eventhub_client_transfer] + :language: python + :dedent: 4 + :caption: Wait until all transferred events have been sent. + + """ + if self.error: + raise self.error + if not self.running: + raise ValueError("Unable to send until client has been started.") + try: + self._handler.wait() + except (errors.TokenExpired, errors.AuthenticationException): + log.info("Sender disconnected due to token error. Attempting reconnect.") + self.reconnect() + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("Sender detached. Attempting reconnect.") + self.reconnect() + else: + log.info("Sender detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("Sender detached. Attempting reconnect.") + self.reconnect() + else: + log.info("Sender detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r).", e) + raise EventHubError("Send failed: {}".format(e)) + + def _on_outcome(self, outcome, condition): + """ + Called when the outcome is received for a delivery. + + :param outcome: The outcome of the message delivery - success or failure. + :type outcome: ~uamqp.constants.MessageSendResult + """ + self._outcome = outcome + self._condition = condition + + @staticmethod + def _error(outcome, condition): + return None if outcome == constants.MessageSendResult.Ok else EventHubError(outcome, condition) diff --git a/azure-eventhubs/azure/eventprocessorhost/__init__.py b/azure-eventhubs/azure/eventprocessorhost/__init__.py new file mode 100644 index 000000000000..c1905da23d12 --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/__init__.py @@ -0,0 +1,21 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +The module provides a means to process Azure Event Hubs events at scale. +""" +try: + from azure.eventprocessorhost.abstract_event_processor import AbstractEventProcessor + from azure.eventprocessorhost.azure_storage_checkpoint_manager import AzureStorageCheckpointLeaseManager + from azure.eventprocessorhost.azure_blob_lease import AzureBlobLease + from azure.eventprocessorhost.checkpoint import Checkpoint + from azure.eventprocessorhost.eh_config import EventHubConfig + from azure.eventprocessorhost.eh_partition_pump import EventHubPartitionPump, PartitionReceiver + from azure.eventprocessorhost.eph import EventProcessorHost, EPHOptions + from azure.eventprocessorhost.partition_manager import PartitionManager + from azure.eventprocessorhost.partition_context import PartitionContext + from azure.eventprocessorhost.partition_pump import PartitionPump +except (SyntaxError, ImportError): + raise ImportError("EventProcessHost is only compatible with Python 3.5 and above.") diff --git a/azure-eventhubs/azure/eventprocessorhost/abstract_checkpoint_manager.py b/azure-eventhubs/azure/eventprocessorhost/abstract_checkpoint_manager.py new file mode 100644 index 000000000000..b4828596542a --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/abstract_checkpoint_manager.py @@ -0,0 +1,72 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +""" +Author: Aaron (Ari) Bornstien +""" +from abc import ABC, abstractmethod + +class AbstractCheckpointManager(ABC): + """ + If you wish to have EventProcessorHost store checkpoints somewhere other than Azure Storage, + you can write your own checkpoint manager using this abstract class. + """ + def __init__(self): + pass + + @abstractmethod + async def create_checkpoint_store_if_not_exists_async(self): + """ + Create the checkpoint store if it doesn't exist. Do nothing if it does exist. + + :return: `True` if the checkpoint store already exists or was created OK, `False` + if there was a failure. + :rtype: bool + """ + + @abstractmethod + async def get_checkpoint_async(self, partition_id): + """ + Get the checkpoint data associated with the given partition. + Could return null if no checkpoint has been created for that partition. + + :param partition_id: The ID of a given parition. + :type partition_id: str + :return: Given partition checkpoint info, or `None` if none has been previously stored. + :rtype: ~azure.eventprocessorhost.checkpoint.Checkpoint + """ + + @abstractmethod + async def create_checkpoint_if_not_exists_async(self, partition_id): + """ + Create the given partition checkpoint if it doesn't exist.Do nothing if it does exist. + The offset/sequenceNumber for a freshly-created checkpoint should be set to StartOfStream/0. + + :param partition_id: The ID of a given parition. + :type partition_id: str + :return: The checkpoint for the given partition, whether newly created or already existing. + :rtype: ~azure.eventprocessorhost.checkpoint.Checkpoint + """ + + @abstractmethod + async def update_checkpoint_async(self, lease, checkpoint): + """ + Update the checkpoint in the store with the offset/sequenceNumber in the provided checkpoint. + + :param lease: The lease to be updated. + :type lease: ~azure.eventprocessorhost.lease.Lease + :param checkpoint: offset/sequeceNumber to update the store with. + :type checkpoint: ~azure.eventprocessorhost.checkpoint.Checkpoint + """ + + @abstractmethod + async def delete_checkpoint_async(self, partition_id): + """ + Delete the stored checkpoint for the given partition. If there is no stored checkpoint + for the given partition, that is treated as success. + + :param partition_id: The ID of a given parition. + :type partition_id: str + """ diff --git a/azure-eventhubs/azure/eventprocessorhost/abstract_event_processor.py b/azure-eventhubs/azure/eventprocessorhost/abstract_event_processor.py new file mode 100644 index 000000000000..4fbd7fb20463 --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/abstract_event_processor.py @@ -0,0 +1,58 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +from abc import ABC, abstractmethod + + +class AbstractEventProcessor(ABC): + """ + Abstract that must be extended by event processor classes. + """ + def __init__(self, params=None): + pass + + @abstractmethod + async def open_async(self, context): + """ + Called by processor host to initialize the event processor. + + :param context: Information about the partition + :type context: ~azure.eventprocessorhost.partition_context.PartitionContext + """ + + @abstractmethod + async def close_async(self, context, reason): + """ + Called by processor host to indicate that the event processor is being stopped. + + :param context: Information about the partition + :type context: ~azure.eventprocessorhost.partition_context.PartitionContext + :param reason: The reason for closing. + :type reason: str + """ + + @abstractmethod + async def process_events_async(self, context, messages): + """ + Called by the processor host when a batch of events has arrived. + This is where the real work of the event processor is done. + + :param context: Information about the partition + :type context: ~azure.eventprocessorhost.partition_context.PartitionContext + :param messages: The events to be processed. + :type messages: list[~azure.eventhub.common.EventData] + """ + + @abstractmethod + async def process_error_async(self, context, error): + """ + Called when the underlying client experiences an error while receiving. + EventProcessorHost will take care of recovering from the error and + continuing to pump messages. + + :param context: Information about the partition + :type context: ~azure.eventprocessorhost.partition_context.PartitionContext + :param error: The error that occured. + """ diff --git a/azure-eventhubs/azure/eventprocessorhost/abstract_lease_manager.py b/azure-eventhubs/azure/eventprocessorhost/abstract_lease_manager.py new file mode 100644 index 000000000000..1577a3b58969 --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/abstract_lease_manager.py @@ -0,0 +1,134 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +""" +Author: Aaron (Ari) Bornstien +""" +from abc import ABC, abstractmethod + +class AbstractLeaseManager(ABC): + """ + If you wish to have EventProcessorHost store leases somewhere other than Azure Storage, + you can write your own lease manager using this abstract class. The Azure Storage managers + use the same storage for both lease and checkpoints, so both interfaces are implemented by + the same class.You are free to do the same thing if you have a unified store for both + types of data. + """ + + def __init__(self, lease_renew_interval, lease_duration): + self.lease_renew_interval = lease_renew_interval + self.lease_duration = lease_duration + + @abstractmethod + async def create_lease_store_if_not_exists_async(self): + """ + Create the lease store if it does not exist, do nothing if it does exist. + + :return: `True` if the lease store already exists or was created successfully, `False` if not. + :rtype: bool + """ + + @abstractmethod + async def delete_lease_store_async(self): + """ + Not used by EventProcessorHost, but a convenient function to have for testing. + + :return: `True` if the lease store was deleted successfully, `False` if not. + :rtype: bool + """ + + async def get_lease_async(self, partition_id): + """ + Return the lease info for the specified partition. + Can return null if no lease has been created in the store for the specified partition. + + :param partition_id: The ID of a given partition. + :type parition_id: str + :return: Lease info for the partition, or `None`. + :rtype: + """ + + @abstractmethod + def get_all_leases(self): + """ + Return the lease info for all partitions. + A typical implementation could just call get_lease_async() on all partitions. + + :return: A list of lease info. + :rtype: + """ + + @abstractmethod + async def create_lease_if_not_exists_async(self, partition_id): + """ + Create in the store the lease info for the given partition, if it does not exist. + Do nothing if it does exist in the store already. + + :param partition_id: The ID of a given partition. + :type parition_id: str + :return: The existing or newly-created lease info for the partition. + """ + + @abstractmethod + async def delete_lease_async(self, lease): + """ + Delete the lease info for the given partition from the store. + If there is no stored lease for the given partition, that is treated as success. + + :param lease: The lease to be deleted. + :type lease: ~azure.eventprocessorhost.lease.Lease + """ + + @abstractmethod + async def acquire_lease_async(self, lease): + """ + Acquire the lease on the desired partition for this EventProcessorHost. + Note that it is legal to acquire a lease that is already owned by another host. + Lease-stealing is how partitions are redistributed when additional hosts are started. + + :param lease: The lease to be acquired. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the lease was acquired successfully, `False` if not. + :rtype: bool + """ + + @abstractmethod + async def renew_lease_async(self, lease): + """ + Renew a lease currently held by this host. + If the lease has been stolen, or expired, or released, it is not possible to renew it. + You will have to call get_lease_async() and then acquire_lease_async() again. + + :param lease: The lease to be renewed. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the lease was renewed successfully, `False` if not. + :rtype: bool + """ + + @abstractmethod + async def release_lease_async(self, lease): + """ + Give up a lease currently held by this host. If the lease has been stolen, or expired, + releasing it is unnecessary, and will fail if attempted. + + :param lease: The lease to be released. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the lease was released successfully, `False` if not. + :rtype: bool + """ + + @abstractmethod + async def update_lease_async(self, lease): + """ + Update the store with the information in the provided lease. It is necessary to currently + hold a lease in order to update it. If the lease has been stolen, or expired, or released, + it cannot be updated. Updating should renew the lease before performing the update to + avoid lease expiration during the process. + + :param lease: The lease to be updated. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the updated was performed successfully, `False` if not. + :rtype: bool + """ diff --git a/azure-eventhubs/azure/eventprocessorhost/azure_blob_lease.py b/azure-eventhubs/azure/eventprocessorhost/azure_blob_lease.py new file mode 100644 index 000000000000..3ffb32961662 --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/azure_blob_lease.py @@ -0,0 +1,72 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import asyncio +import json + +from azure.eventprocessorhost.lease import Lease + + +class AzureBlobLease(Lease): + """ + Azure Blob Lease + """ + + def __init__(self): + """ + Init Azure Blob Lease. + """ + super() + Lease.__init__(self) + self.offset = None + self.state = lambda: None + + def serializable(self): + """ + Returns Serialiazble instance of `__dict__`. + """ + serial = self.__dict__.copy() + del serial['state'] + return serial + + def with_lease(self, lease): + """ + Init with exisiting lease. + """ + super().with_source(lease) + + def with_blob(self, blob): + """ + Init Azure Blob Lease with existing blob. + """ + content = json.loads(blob.content) + self.partition_id = content["partition_id"] + self.owner = content["owner"] + self.token = content["token"] + self.epoch = content["epoch"] + self.offset = content["offset"] + self.sequence_number = content["sequence_number"] + self.event_processor_context = content.get("event_processor_context") + + def with_source(self, lease): + """ + Init Azure Blob Lease from existing. + """ + super().with_source(lease) + self.offset = lease.offset + self.sequence_number = lease.sequence_number + + async def is_expired(self): + """ + Check and return Azure Blob Lease state using Storage API. + """ + if asyncio.iscoroutinefunction(self.state): + current_state = await self.state() + else: + current_state = self.state() + if current_state: + return current_state != "leased" + return False + \ No newline at end of file diff --git a/azure-eventhubs/azure/eventprocessorhost/azure_storage_checkpoint_manager.py b/azure-eventhubs/azure/eventprocessorhost/azure_storage_checkpoint_manager.py new file mode 100644 index 000000000000..59df582efc87 --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/azure_storage_checkpoint_manager.py @@ -0,0 +1,476 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import re +import json +import uuid +import logging +import concurrent.futures +import functools +import asyncio +import requests + +from azure.storage.blob import BlockBlobService +from azure.eventprocessorhost.azure_blob_lease import AzureBlobLease +from azure.eventprocessorhost.checkpoint import Checkpoint +from azure.eventprocessorhost.abstract_lease_manager import AbstractLeaseManager +from azure.eventprocessorhost.abstract_checkpoint_manager import AbstractCheckpointManager + + +_logger = logging.getLogger(__name__) + + +class AzureStorageCheckpointLeaseManager(AbstractCheckpointManager, AbstractLeaseManager): + """ + Manages checkpoints and lease with azure storage blobs. In this implementation, + checkpoints are data that's actually in the lease blob, so checkpoint operations + turn into lease operations under the covers. + + :param str storage_account_name: The storage account name. This is used to + authenticate requests signed with an account key and to construct the storage + endpoint. It is required unless a connection string is given. + :param str storage_account_key: The storage account key. This is used for shared key + authentication. If neither account key or sas token is specified, anonymous access + will be used. + :param str lease_container_name: The name of the container that will be used to store + leases. If it does not already exist it will be created. Default value is 'eph-leases'. + :param int lease_renew_interval: The interval in seconds at which EPH will attempt to + renew the lease of a particular partition. Default value is 10. + :param int lease_duration: The duration in seconds of a lease on a partition. + Default value is 30. + :param str sas_token: A shared access signature token to use to authenticate requests + instead of the account key. If account key and sas token are both specified, + account key will be used to sign. If neither are specified, anonymous access will be used. + :param str endpoint_suffix: The host base component of the url, minus the account name. + Defaults to Azure (core.windows.net). Override this to use a National Cloud. + :param str connection_string: If specified, this will override all other endpoint parameters. + See http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ + for the connection string format. + """ + + def __init__(self, storage_account_name=None, storage_account_key=None, lease_container_name="eph-leases", + storage_blob_prefix=None, lease_renew_interval=10, lease_duration=30, + sas_token=None, endpoint_suffix="core.windows.net", connection_string=None): + AbstractCheckpointManager.__init__(self) + AbstractLeaseManager.__init__(self, lease_renew_interval, lease_duration) + self.storage_account_name = storage_account_name + self.storage_account_key = storage_account_key + self.storage_sas_token = sas_token + self.endpoint_suffix = endpoint_suffix + self.connection_string = connection_string + self.lease_container_name = lease_container_name + self.storage_blob_prefix = storage_blob_prefix + self.storage_client = None + self.consumer_group_directory = None + self.host = None + self.storage_max_execution_time = 120 + self.request_session = requests.Session() + self.request_session.mount('https://', requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)) + self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=32) + + # Validate storage inputs + if not self.storage_account_name and not self.connection_string: + raise ValueError("Need a valid storage account name or connection string.") + if not re.compile(r"^[a-z0-9](([a-z0-9\-[^\-])){1,61}[a-z0-9]$").match(self.lease_container_name): + raise ValueError("Azure Storage lease container name is invalid.\ + Please check naming conventions at\ + https:# msdn.microsoft.com/en-us/library/azure/dd135715.aspx") + + if self.storage_blob_prefix: + self.storage_blob_prefix.replace(" ", "") # Convert all-whitespace to empty string. + else: + self.storage_blob_prefix = "" # Convert null prefix to empty string. + + def initialize(self, host): + """ + The EventProcessorHost can't pass itself to the AzureStorageCheckpointLeaseManager + constructor because it is still being constructed. Do other initialization here + also because it might throw and hence we don't want it in the constructor. + """ + self.host = host + self.storage_client = BlockBlobService(account_name=self.storage_account_name, + account_key=self.storage_account_key, + sas_token=self.storage_sas_token, + endpoint_suffix=self.endpoint_suffix, + connection_string=self.connection_string, + request_session=self.request_session) + self.consumer_group_directory = self.storage_blob_prefix + self.host.eh_config.consumer_group + + # Checkpoint Managment Methods + + async def create_checkpoint_store_if_not_exists_async(self): + """ + Create the checkpoint store if it doesn't exist. Do nothing if it does exist. + + :return: `True` if the checkpoint store already exists or was created OK, `False` + if there was a failure. + :rtype: bool + """ + await self.create_lease_store_if_not_exists_async() + + async def get_checkpoint_async(self, partition_id): + """ + Get the checkpoint data associated with the given partition. + Could return null if no checkpoint has been created for that partition. + + :param partition_id: The partition ID. + :type partition_id: str + :return: Given partition checkpoint info, or `None` if none has been previously stored. + :rtype: ~azure.eventprocessorhost.checkpoint.Checkpoint + """ + lease = await self.get_lease_async(partition_id) + checkpoint = None + if lease: + if lease.offset: + checkpoint = Checkpoint(partition_id, lease.offset, + lease.sequence_number) + return checkpoint + + async def create_checkpoint_if_not_exists_async(self, partition_id): + """ + Create the given partition checkpoint if it doesn't exist.Do nothing if it does exist. + The offset/sequenceNumber for a freshly-created checkpoint should be set to StartOfStream/0. + + :param partition_id: The partition ID. + :type partition_id: str + :return: The checkpoint for the given partition, whether newly created or already existing. + :rtype: ~azure.eventprocessorhost.checkpoint.Checkpoint + """ + checkpoint = await self.get_checkpoint_async(partition_id) + if not checkpoint: + await self.create_lease_if_not_exists_async(partition_id) + checkpoint = Checkpoint(partition_id) + return checkpoint + + async def update_checkpoint_async(self, lease, checkpoint): + """ + Update the checkpoint in the store with the offset/sequenceNumber in the provided checkpoint + checkpoint:offset/sequeceNumber to update the store with. + + :param lease: The stored lease to be updated. + :type lease: ~azure.eventprocessorhost.lease.Lease + :param checkpoint: The checkpoint to update the lease with. + :type checkpoint: ~azure.eventprocessorhost.checkpoint.Checkpoint + """ + new_lease = AzureBlobLease() + new_lease.with_source(lease) + new_lease.offset = checkpoint.offset + new_lease.sequence_number = checkpoint.sequence_number + return await self.update_lease_async(new_lease) + + async def delete_checkpoint_async(self, partition_id): + """ + Delete the stored checkpoint for the given partition. If there is no stored checkpoint + for the given partition, that is treated as success. + + :param partition_id: The partition ID. + :type partition_id: str + """ + return # Make this a no-op to avoid deleting leases by accident. + + # Lease Managment Methods + + async def create_lease_store_if_not_exists_async(self): + """ + Create the lease store if it does not exist, do nothing if it does exist. + + :return: `True` if the lease store already exists or was created successfully, `False` if not. + :rtype: bool + """ + try: + await self.host.loop.run_in_executor( + self.executor, + functools.partial( + self.storage_client.create_container, + self.lease_container_name)) + + except Exception as err: # pylint: disable=broad-except + _logger.error("%r", err) + raise err + + return True + + async def delete_lease_store_async(self): + """ + Not used by EventProcessorHost, but a convenient function to have for testing. + + :return: `True` if the lease store was deleted successfully, `False` if not. + :rtype: bool + """ + return "Not Supported in Python" + + async def get_lease_async(self, partition_id): + """ + Return the lease info for the specified partition. + Can return null if no lease has been created in the store for the specified partition. + + :param partition_id: The partition ID. + :type partition_id: str + :return: lease info for the partition, or `None`. + :rtype: ~azure.eventprocessorhost.lease.Lease + """ + try: + blob = await self.host.loop.run_in_executor( + self.executor, + functools.partial( + self.storage_client.get_blob_to_text, + self.lease_container_name, partition_id)) + lease = AzureBlobLease() + lease.with_blob(blob) + async def state(): + """ + Allow lease to curry storage_client to get state + """ + try: + loop = asyncio.get_event_loop() + res = await loop.run_in_executor( + self.executor, + functools.partial( + self.storage_client.get_blob_properties, + self.lease_container_name, + partition_id)) + return res.properties.lease.state + except Exception as err: # pylint: disable=broad-except + _logger.error("Failed to get lease state %r %r", err, partition_id) + + lease.state = state + return lease + except Exception as err: # pylint: disable=broad-except + _logger.error("Failed to get lease %r %r", err, partition_id) + + async def get_all_leases(self): + """ + Return the lease info for all partitions. + A typical implementation could just call get_lease_async() on all partitions. + + :return: A list of lease info. + :rtype: list[~azure.eventprocessorhost.lease.Lease] + """ + lease_futures = [] + partition_ids = await self.host.partition_manager.get_partition_ids_async() + for partition_id in partition_ids: + lease_futures.append(self.get_lease_async(partition_id)) + return lease_futures + + async def create_lease_if_not_exists_async(self, partition_id): + """ + Create in the store the lease info for the given partition, if it does not exist. + Do nothing if it does exist in the store already. + + :param partition_id: The ID of a given parition. + :type partition_id: str + :return: the existing or newly-created lease info for the partition. + :rtype: ~azure.eventprocessorhost.lease.Lease + """ + return_lease = None + try: + return_lease = AzureBlobLease() + return_lease.partition_id = partition_id + serializable_lease = return_lease.serializable() + json_lease = json.dumps(serializable_lease) + _logger.info("Creating Lease %r %r %r", + self.lease_container_name, + partition_id, + json.dumps({k:v for k, v in serializable_lease.items() if k != 'event_processor_context'})) + await self.host.loop.run_in_executor( + self.executor, + functools.partial( + self.storage_client.create_blob_from_text, + self.lease_container_name, + partition_id, + json_lease)) + except Exception: # pylint: disable=broad-except + try: + return_lease = await self.get_lease_async(partition_id) + except Exception as err: # pylint: disable=broad-except + _logger.error("Failed to create lease %r", err) + raise err + return return_lease + + async def delete_lease_async(self, lease): + """ + Delete the lease info for the given partition from the store. + If there is no stored lease for the given partition, that is treated as success. + + :param lease: The stored lease to be deleted. + :type lease: ~azure.eventprocessorhost.lease.Lease + """ + await self.host.loop.run_in_executor( + self.executor, + functools.partial( + self.storage_client.delete_blob, + self.lease_container_name, + lease.partition_id, + lease_id=lease.token)) + + async def acquire_lease_async(self, lease): + """ + Acquire the lease on the desired partition for this EventProcessorHost. + Note that it is legal to acquire a lease that is already owned by another host. + Lease-stealing is how partitions are redistributed when additional hosts are started. + + :param lease: The stored lease to be acquired. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the lease was acquired successfully, `False` if not. + :rtype: bool + """ + retval = True + new_lease_id = str(uuid.uuid4()) + partition_id = lease.partition_id + try: + if asyncio.iscoroutinefunction(lease.state): + state = await lease.state() + else: + state = lease.state() + if state == "leased": + if not lease.token: + # We reach here in a race condition: when this instance of EventProcessorHost + # scanned the lease blobs, this partition was unowned (token is empty) but + # between then and now, another instance of EPH has established a lease + # (getLeaseState() is LEASED). We normally enforcethat we only steal the lease + # if it is still owned by the instance which owned it when we scanned, but we + # can't do that when we don't know who owns it. The safest thing to do is just + # fail the acquisition. If that means that one EPH instance gets more partitions + # than it should, rebalancing will take care of that quickly enough. + retval = False + else: + _logger.info("ChangingLease %r %r", self.host.guid, lease.partition_id) + await self.host.loop.run_in_executor( + self.executor, + functools.partial( + self.storage_client.change_blob_lease, + self.lease_container_name, + partition_id, + lease.token, + new_lease_id)) + lease.token = new_lease_id + else: + _logger.info("AcquiringLease %r %r", self.host.guid, lease.partition_id) + lease.token = await self.host.loop.run_in_executor( + self.executor, + functools.partial( + self.storage_client.acquire_blob_lease, + self.lease_container_name, + partition_id, + self.lease_duration, + new_lease_id)) + lease.owner = self.host.host_name + lease.increment_epoch() + # check if this solves the issue + retval = await self.update_lease_async(lease) + except Exception as err: # pylint: disable=broad-except + _logger.error("Failed to acquire lease %r %r %r", err, partition_id, lease.token) + return False + + return retval + + async def renew_lease_async(self, lease): + """ + Renew a lease currently held by this host. + If the lease has been stolen, or expired, or released, it is not possible to renew it. + You will have to call getLease() and then acquireLease() again. + + :param lease: The stored lease to be renewed. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the lease was renewed successfully, `False` if not. + :rtype: bool + """ + try: + await self.host.loop.run_in_executor( + self.executor, + functools.partial( + self.storage_client.renew_blob_lease, + self.lease_container_name, + lease.partition_id, + lease_id=lease.token, + timeout=self.lease_duration)) + except Exception as err: # pylint: disable=broad-except + if "LeaseIdMismatchWithLeaseOperation" in str(err): + _logger.info("LeaseLost on partition %r", lease.partition_id) + else: + _logger.error("Failed to renew lease on partition %r with token %r %r", + lease.partition_id, lease.token, err) + return False + return True + + async def release_lease_async(self, lease): + """ + Give up a lease currently held by this host. If the lease has been stolen, or expired, + releasing it is unnecessary, and will fail if attempted. + + :param lease: The stored lease to be released. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the lease was released successfully, `False` if not. + :rtype: bool + """ + lease_id = None + try: + _logger.info("Releasing lease %r %r", self.host.guid, lease.partition_id) + lease_id = lease.token + released_copy = AzureBlobLease() + released_copy.with_lease(lease) + released_copy.token = None + released_copy.owner = None + released_copy.state = None + await self.host.loop.run_in_executor( + self.executor, + functools.partial( + self.storage_client.create_blob_from_text, + self.lease_container_name, + lease.partition_id, + json.dumps(released_copy.serializable()), + lease_id=lease_id)) + await self.host.loop.run_in_executor( + self.executor, + functools.partial( + self.storage_client.release_blob_lease, + self.lease_container_name, + lease.partition_id, + lease_id)) + except Exception as err: # pylint: disable=broad-except + _logger.error("Failed to release lease %r %r %r", + err, lease.partition_id, lease_id) + return False + return True + + async def update_lease_async(self, lease): + """ + Update the store with the information in the provided lease. It is necessary to currently + hold a lease in order to update it. If the lease has been stolen, or expired, or released, + it cannot be updated. Updating should renew the lease before performing the update to + avoid lease expiration during the process. + + :param lease: The stored lease to be updated. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the updated was performed successfully, `False` if not. + :rtype: bool + """ + if lease is None: + return False + + if not lease.token: + return False + + _logger.debug("Updating lease %r %r", self.host.guid, lease.partition_id) + + # First, renew the lease to make sure the update will go through. + if await self.renew_lease_async(lease): + try: + await self.host.loop.run_in_executor( + self.executor, + functools.partial( + self.storage_client.create_blob_from_text, + self.lease_container_name, + lease.partition_id, + json.dumps(lease.serializable()), + lease_id=lease.token)) + + except Exception as err: # pylint: disable=broad-except + _logger.error("Failed to update lease %r %r %r", + self.host.guid, lease.partition_id, err) + raise err + else: + return False + return True diff --git a/azure-eventhubs/azure/eventprocessorhost/cancellation_token.py b/azure-eventhubs/azure/eventprocessorhost/cancellation_token.py new file mode 100644 index 000000000000..ae1aeaebdffc --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/cancellation_token.py @@ -0,0 +1,20 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +""" +Based on https://stackoverflow.com/questions/43229939/how-to-pass-a-boolean-by-reference-across-threads-and-modules +""" +class CancellationToken: + """ + Thread Safe Mutable Cancellation Token. + """ + def __init__(self): + self.is_cancelled = False + + def cancel(self): + """ + Cancel the token. + """ + self.is_cancelled = True diff --git a/azure-eventhubs/azure/eventprocessorhost/checkpoint.py b/azure-eventhubs/azure/eventprocessorhost/checkpoint.py new file mode 100644 index 000000000000..ff09052336f0 --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/checkpoint.py @@ -0,0 +1,34 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +class Checkpoint: + """ + Contains checkpoint metadata. + """ + + def __init__(self, partition_id, offset="-1", sequence_number="0"): + """Initialize Checkpoint. + + :param partition_id: The parition ID of the checkpoint. + :type partition_id: str + :param offset: The receive offset of the checkpoint. + :type offset: str + :param sequence_number: The sequence number of the checkpoint. + :type sequence_number: str + """ + self.partition_id = partition_id + self.offset = offset + self.sequence_number = sequence_number + + def from_source(self, checkpoint): + """ + Creates a new Checkpoint from an existing checkpoint. + + :param checkpoint: Existing checkpoint. + :type checkpoint: ~azure.eventprocessorhost.checkpoint.Checkpoint + """ + self.partition_id = checkpoint.partition_id + self.offset = checkpoint.offset + self.sequence_number = checkpoint.sequence_number diff --git a/azure-eventhubs/azure/eventprocessorhost/eh_config.py b/azure-eventhubs/azure/eventprocessorhost/eh_config.py new file mode 100644 index 000000000000..73f89a8306e8 --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/eh_config.py @@ -0,0 +1,71 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import time +import urllib +import hmac +import hashlib +import base64 + +class EventHubConfig: + """ + A container class for Event Hub properties. + + :param sb_name: The EventHub (ServiceBus) namespace. + :type sb_name: str + :param eh_name: The EventHub name. + :type eh_name: str + :param policy: The SAS policy name. + :type policy: str + :param sas_key: The SAS access key. + :type sas_key: str + :param consumer_group: The EventHub consumer group to receive from. The + default value is '$default'. + :type consumer_group: str + :param namespace_suffix: The ServiceBus namespace URL suffix. + The default value is 'servicebus.windows.net'. + :type namespace_suffix: str + """ + def __init__(self, sb_name, eh_name, policy, sas_key, + consumer_group="$default", + namespace_suffix="servicebus.windows.net"): + self.sb_name = sb_name + self.eh_name = eh_name + self.policy = policy + self.sas_key = sas_key + self.namespace_suffix = namespace_suffix + self.consumer_group = consumer_group + self.client_address = self.get_client_address() + self.rest_token = self.get_rest_token() + + def get_client_address(self): + """ + Returns an auth token dictionary for making calls to eventhub + REST API. + + :rtype: str + """ + return "amqps://{}:{}@{}.{}:5671/{}".format( + urllib.parse.quote_plus(self.policy), + urllib.parse.quote_plus(self.sas_key), + self.sb_name, + self.namespace_suffix, + self.eh_name) + + def get_rest_token(self): + """ + Returns an auth token for making calls to eventhub REST API. + + :rtype: str + """ + uri = urllib.parse.quote_plus( + "https://{}.{}/{}".format(self.sb_name, self.namespace_suffix, self.eh_name)) + sas = self.sas_key.encode('utf-8') + expiry = str(int(time.time() + 10000)) + string_to_sign = ('{}\n{}'.format(uri, expiry)).encode('utf-8') + signed_hmac_sha256 = hmac.HMAC(sas, string_to_sign, hashlib.sha256) + signature = urllib.parse.quote(base64.b64encode(signed_hmac_sha256.digest())) + return 'SharedAccessSignature sr={}&sig={}&se={}&skn={}' \ + .format(uri, signature, expiry, self.policy) diff --git a/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py b/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py new file mode 100644 index 000000000000..e0aa25dc2e8d --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py @@ -0,0 +1,168 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import logging +import asyncio +from azure.eventhub import Offset, EventHubClientAsync +from azure.eventprocessorhost.partition_pump import PartitionPump + + +_logger = logging.getLogger(__name__) + + +class EventHubPartitionPump(PartitionPump): + """ + Pulls and messages from lease partition from eventhub and sends them to processor. + """ + + def __init__(self, host, lease): + PartitionPump.__init__(self, host, lease) + self.eh_client = None + self.partition_receiver = None + self.partition_receive_handler = None + self.running = None + + async def on_open_async(self): + """ + Eventhub Override for on_open_async. + """ + _opened_ok = False + _retry_count = 0 + while (not _opened_ok) and (_retry_count < 5): + try: + await self.open_clients_async() + _opened_ok = True + except Exception as err: # pylint: disable=broad-except + _logger.warning( + "%r,%r PartitionPumpWarning: Failure creating client or receiver, retrying: %r", + self.host.guid, self.partition_context.partition_id, err) + last_exception = err + _retry_count += 1 + + if not _opened_ok: + await self.processor.process_error_async(self.partition_context, last_exception) + self.set_pump_status("OpenFailed") + + if self.pump_status == "Opening": + loop = asyncio.get_event_loop() + self.set_pump_status("Running") + await self.eh_client.run_async() + self.running = loop.create_task(self.partition_receiver.run()) + + if self.pump_status in ["OpenFailed", "Errored"]: + self.set_pump_status("Closing") + await self.clean_up_clients_async() + self.set_pump_status("Closed") + + + async def open_clients_async(self): + """ + Responsible for establishing connection to event hub client + throws EventHubsException, IOException, InterruptedException, ExecutionException. + """ + await self.partition_context.get_initial_offset_async() + # Create event hub client and receive handler and set options + self.eh_client = EventHubClientAsync( + self.host.eh_config.client_address, + debug=self.host.eph_options.debug_trace, + http_proxy=self.host.eph_options.http_proxy) + self.partition_receive_handler = self.eh_client.add_async_receiver( + self.partition_context.consumer_group_name, + self.partition_context.partition_id, + Offset(self.partition_context.offset), + prefetch=self.host.eph_options.prefetch_count, + keep_alive=self.host.eph_options.keep_alive_interval, + auto_reconnect=self.host.eph_options.auto_reconnect_on_error, + loop=self.loop) + self.partition_receiver = PartitionReceiver(self) + + async def clean_up_clients_async(self): + """ + Resets the pump swallows all exceptions. + """ + if self.partition_receiver: + if self.eh_client: + await self.eh_client.stop_async() + self.partition_receiver = None + self.partition_receive_handler = None + self.eh_client = None + + async def on_closing_async(self, reason): + """ + Overides partition pump on closing. + + :param reason: The reason for the shutdown. + :type reason: str + """ + self.partition_receiver.eh_partition_pump.set_pump_status("Errored") + try: + await self.running + except TypeError: + _logger.debug("No partition pump running.") + except Exception as err: # pylint: disable=broad-except + _logger.info("Error on closing partition pump: %r", err) + await self.clean_up_clients_async() + + +class PartitionReceiver: + """ + Recieves events asynchronously until lease is lost. + """ + + def __init__(self, eh_partition_pump): + self.eh_partition_pump = eh_partition_pump + self.max_batch_size = self.eh_partition_pump.host.eph_options.max_batch_size + self.recieve_timeout = self.eh_partition_pump.host.eph_options.receive_timeout + + async def run(self): + """ + Runs the async partion reciever event loop to retrive messages from the event queue. + """ + # Implement pull max batch from queue instead of one message at a time + while self.eh_partition_pump.pump_status != "Errored" and not self.eh_partition_pump.is_closing(): + if self.eh_partition_pump.partition_receive_handler: + try: + msgs = await self.eh_partition_pump.partition_receive_handler.receive( + max_batch_size=self.max_batch_size, + timeout=self.recieve_timeout) + except Exception as e: # pylint: disable=broad-except + _logger.info("Error raised while attempting to receive messages: %r", e) + await self.process_error_async(e) + else: + if not msgs: + _logger.info("No events received, queue size %r, release %r", + self.eh_partition_pump.partition_receive_handler.queue_size, + self.eh_partition_pump.host.eph_options.release_pump_on_timeout) + if self.eh_partition_pump.host.eph_options.release_pump_on_timeout: + await self.process_error_async(TimeoutError("No events received")) + else: + await self.process_events_async(msgs) + + async def process_events_async(self, events): + """ + This method is called on the thread that the EH client uses to run the pump. + There is one pump per EventHubClient. Since each PartitionPump creates a + new EventHubClient, using that thread to call OnEvents does no harm. Even if OnEvents + is slow, the pump will get control back each time OnEvents returns, and be able to receive + a new batch of messages with which to make the next OnEvents call.The pump gains nothing + by running faster than OnEvents. + + :param events: List of events to be processed. + :type events: list of ~azure.eventhub.common.EventData + """ + await self.eh_partition_pump.process_events_async(events) + + async def process_error_async(self, error): + """ + Handles processing errors this is never called since python recieve client doesn't + have error handling implemented (TBD add fault pump handling). + + :param error: An error the occurred. + :type error: Exception + """ + try: + await self.eh_partition_pump.process_error_async(error) + finally: + self.eh_partition_pump.set_pump_status("Errored") diff --git a/azure-eventhubs/azure/eventprocessorhost/eph.py b/azure-eventhubs/azure/eventprocessorhost/eph.py new file mode 100644 index 000000000000..2e464e10235c --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/eph.py @@ -0,0 +1,110 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import uuid +import asyncio +from azure.eventprocessorhost.partition_manager import PartitionManager + + +class EventProcessorHost: + """ + Represents a host for processing Event Hubs event data at scale. + Takes in an event hub, a event processor class definition, a config object, + as well as a storage manager and optional event processor params (ep_params). + """ + + def __init__(self, event_processor, eh_config, storage_manager, ep_params=None, eph_options=None, loop=None): + """ + Initialize EventProcessorHost. + + :param event_processor: The event processing handler. + :type event_processor: ~azure.eventprocessorhost.abstract_event_processor.AbstractEventProcessor + :param eh_config: The EPH connection configuration. + :type eh_config: ~azure.eventprocessorhost.eh_config.EventHubConfig + :param storage_manager: The Azure storage manager for persisting lease and + checkpoint information. + :type storage_manager: + ~azure.eventprocessorhost.azure_storage_checkpoint_manager.AzureStorageCheckpointLeaseManager + :param ep_params: Optional arbitrary parameters to be passed into the event_processor + on initialization. + :type ep_params: list + :param eph_options: EPH configuration options. + :type eph_options: ~azure.eventprocessorhost.eph.EPHOptions + :param loop: An eventloop. If not provided the default asyncio event loop will be used. + """ + self.event_processor = event_processor + self.event_processor_params = ep_params + self.eh_config = eh_config + self.guid = str(uuid.uuid4()) + self.host_name = "host" + str(self.guid) + self.loop = loop or asyncio.get_event_loop() + self.eph_options = eph_options or EPHOptions() + self.partition_manager = PartitionManager(self) + self.storage_manager = storage_manager + if self.storage_manager: + self.storage_manager.initialize(self) + + async def open_async(self): + """ + Starts the host. + """ + if not self.loop: + self.loop = asyncio.get_event_loop() + await self.partition_manager.start_async() + + async def close_async(self): + """ + Stops the host. + """ + await self.partition_manager.stop_async() + + +class EPHOptions: + """ + Class that contains default and overidable EPH option. + + :ivar max_batch_size: The maximum number of events retrieved for processing + at a time. This value must be less than or equal to the prefetch count. The actual + number of events returned for processing may be any number up to the maximum. + The default value is 10. + :vartype max_batch_size: int + :ivar prefetch_count: The number of events to fetch from the service in advance of + processing. The default value is 300. + :vartype prefetch_count: int + :ivar receive_timeout: The length of time a single partition receiver will wait in + order to receive a batch of events. Default is 60 seconds. + :vartype receive_timeout: int + :ivar release_pump_on_timeout: Whether to shutdown an individual partition receiver if + no events were received in the specified timeout. Shutting down the pump will release + the lease to allow it to be picked up by another host. Default is False. + :vartype release_pump_on_timeout: bool + :ivar initial_offset_provider: The initial event offset to receive from if no persisted + offset is found. Default is "-1" (i.e. from the first event available). + :vartype initial_offset_provider: str + :ivar debug_trace: Whether to emit the network traffic in the logs. In order to view + these events the logger must be configured to track "uamqp". Default is False. + :vartype debug_trace: bool + :ivar http_proxy: HTTP proxy configuration. This should be a dictionary with + the following keys present: 'proxy_hostname' and 'proxy_port'. Additional optional + keys are 'username' and 'password'. + :vartype http_proxy: dict + :ivar keep_alive_interval: The time in seconds between asynchronously pinging a receiver + connection to keep it alive during inactivity. Default is None - i.e. no connection pinging. + :vartype keep_alive_interval: int + :ivar auto_reconnect_on_error: Whether to automatically attempt to reconnect a receiver + connection if it is detach from the service with a retryable error. Default is True. + :vartype auto_reconnect_on_error: bool + """ + + def __init__(self): + self.max_batch_size = 10 + self.prefetch_count = 300 + self.receive_timeout = 60 + self.release_pump_on_timeout = False + self.initial_offset_provider = "-1" + self.debug_trace = False + self.http_proxy = None + self.keep_alive_interval = None + self.auto_reconnect_on_error = True diff --git a/azure-eventhubs/azure/eventprocessorhost/lease.py b/azure-eventhubs/azure/eventprocessorhost/lease.py new file mode 100644 index 000000000000..02e863e2c5e5 --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/lease.py @@ -0,0 +1,60 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + + +class Lease: + """ + Lease contains partition processing state metadata used to manage partition state. + """ + + def __init__(self): + self.partition_id = None + self.sequence_number = None + self.owner = None + self.token = None + self.epoch = 0 + self.event_processor_context = None + + def with_partition_id(self, partition_id): + """ + Init with partition Id. + + :param partition_id: ID of a given partition. + :type partition_id: str + """ + self.partition_id = partition_id + self.owner = None + self.token = None + self.epoch = 0 + self.event_processor_context = None + + def with_source(self, lease): + """ + Init with existing lease. + + :param lease: An existing Lease. + :type lease: ~azure.eventprocessorhost.lease.Lease + """ + self.partition_id = lease.partition_id + self.epoch = lease.epoch + self.owner = lease.owner + self.token = lease.token + self.event_processor_context = lease.event_processor_context + + async def is_expired(self): + """ + Determines whether the lease is expired. By default lease never expires. + Deriving class implements the lease expiry logic. + + :rtype: bool + """ + return False + + def increment_epoch(self): + """ + Increment lease epoch. + """ + self.epoch += 1 + return self.epoch diff --git a/azure-eventhubs/azure/eventprocessorhost/partition_context.py b/azure-eventhubs/azure/eventprocessorhost/partition_context.py new file mode 100644 index 000000000000..947645f6f780 --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/partition_context.py @@ -0,0 +1,155 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- +import asyncio +import logging +from azure.eventprocessorhost.checkpoint import Checkpoint + + +_logger = logging.getLogger(__name__) + + +class PartitionContext: + """ + Encapsulates information related to an Event Hubs partition used by AbstractEventProcessor. + """ + + def __init__(self, host, partition_id, eh_path, consumer_group_name, pump_loop=None): + self.host = host + self.partition_id = partition_id + self.eh_path = eh_path + self.consumer_group_name = consumer_group_name + self.offset = "-1" + self.sequence_number = 0 + self.lease = None + self.event_processor_context = None + self.pump_loop = pump_loop or asyncio.get_event_loop() + + def set_offset_and_sequence_number(self, event_data): + """ + Updates offset based on event. + + :param event_data: A received EventData with valid offset and sequenceNumber. + :type event_data: ~azure.eventhub.common.EventData + """ + if not event_data: + raise Exception(event_data) + self.offset = event_data.offset.value + self.sequence_number = event_data.sequence_number + + async def get_initial_offset_async(self): # throws InterruptedException, ExecutionException + """ + Gets the initial offset for processing the partition. + + :rtype: str + """ + _logger.info("Calling user-provided initial offset provider %r %r", + self.host.guid, self.partition_id) + starting_checkpoint = await self.host.storage_manager.get_checkpoint_async(self.partition_id) + if not starting_checkpoint: + # No checkpoint was ever stored. Use the initialOffsetProvider instead + # defaults to "-1" + self.offset = self.host.eph_options.initial_offset_provider + self.sequence_number = -1 + else: + self.offset = starting_checkpoint.offset + self.sequence_number = starting_checkpoint.sequence_number + + _logger.info("%r %r Initial offset/sequenceNumber provided %r/%r", + self.host.guid, self.partition_id, self.offset, self.sequence_number) + return self.offset + + async def checkpoint_async(self, event_processor_context=None): + """ + Generates a checkpoint for the partition using the curren offset and sequenceNumber for + and persists to the checkpoint manager. + + :param event_processor_context An optional custom state value for the Event Processor. + This data must be in a JSON serializable format. + :type event_processor_context: str or dict + """ + captured_checkpoint = Checkpoint(self.partition_id, self.offset, self.sequence_number) + await self.persist_checkpoint_async(captured_checkpoint, event_processor_context) + self.event_processor_context = event_processor_context + + async def checkpoint_async_event_data(self, event_data, event_processor_context=None): + """ + Stores the offset and sequenceNumber from the provided received EventData instance, + then writes those values to the checkpoint store via the checkpoint manager. + Optionally stores the state of the Event Processor along the checkpoint. + + :param event_data: A received EventData with valid offset and sequenceNumber. + :type event_data: ~azure.eventhub.common.EventData + :param event_processor_context An optional custom state value for the Event Processor. + This data must be in a JSON serializable format. + :type event_processor_context: str or dict + :raises: ValueError if suplied event_data is None. + :raises: ValueError if the sequenceNumber is less than the last checkpointed value. + """ + if not event_data: + raise ValueError("event_data") + if event_data.sequence_number > self.sequence_number: + #We have never seen this sequence number yet + raise ValueError("Argument Out Of Range event_data x-opt-sequence-number") + + await self.persist_checkpoint_async(Checkpoint(self.partition_id, + event_data.offset.value, + event_data.sequence_number), + event_processor_context) + self.event_processor_context = event_processor_context + + def to_string(self): + """ + Returns the parition context in the following format: + "PartitionContext({EventHubPath}{ConsumerGroupName}{PartitionId}{SequenceNumber})" + + :rtype: str + """ + return "PartitionContext({}{}{}{})".format(self.eh_path, + self.consumer_group_name, + self.partition_id, + self.sequence_number) + + async def persist_checkpoint_async(self, checkpoint, event_processor_context=None): + """ + Persists the checkpoint, and - optionally - the state of the Event Processor. + + :param checkpoint: The checkpoint to persist. + :type checkpoint: ~azure.eventprocessorhost.checkpoint.Checkpoint + :param event_processor_context An optional custom state value for the Event Processor. + This data must be in a JSON serializable format. + :type event_processor_context: str or dict + """ + _logger.debug("PartitionPumpCheckpointStart %r %r %r %r", + self.host.guid, checkpoint.partition_id, checkpoint.offset, checkpoint.sequence_number) + try: + in_store_checkpoint = await self.host.storage_manager.get_checkpoint_async(checkpoint.partition_id) + if not in_store_checkpoint or checkpoint.sequence_number >= in_store_checkpoint.sequence_number: + if not in_store_checkpoint: + _logger.info("persisting checkpoint %r", checkpoint.__dict__) + await self.host.storage_manager.create_checkpoint_if_not_exists_async(checkpoint.partition_id) + + self.lease.event_processor_context = event_processor_context + if not await self.host.storage_manager.update_checkpoint_async(self.lease, checkpoint): + _logger.error("Failed to persist checkpoint for partition: %r", self.partition_id) + raise Exception("failed to persist checkpoint") + self.lease.offset = checkpoint.offset + self.lease.sequence_number = checkpoint.sequence_number + else: + _logger.error( # pylint: disable=logging-not-lazy + "Ignoring out of date checkpoint with offset %r/sequence number %r because " + + "current persisted checkpoint has higher offset %r/sequence number %r", + checkpoint.offset, + checkpoint.sequence_number, + in_store_checkpoint.offset, + in_store_checkpoint.sequence_number) + raise Exception("offset/sequenceNumber invalid") + + except Exception as err: + _logger.error("PartitionPumpCheckpointError %r %r %r", + self.host.guid, checkpoint.partition_id, err) + raise + finally: + _logger.debug("PartitionPumpCheckpointStop %r %r", + self.host.guid, checkpoint.partition_id) diff --git a/azure-eventhubs/azure/eventprocessorhost/partition_manager.py b/azure-eventhubs/azure/eventprocessorhost/partition_manager.py new file mode 100644 index 000000000000..41aaded73b56 --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/partition_manager.py @@ -0,0 +1,360 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import logging +import asyncio +from queue import Queue +from collections import Counter + +from azure.eventhub import EventHubClientAsync +from azure.eventprocessorhost.eh_partition_pump import EventHubPartitionPump +from azure.eventprocessorhost.cancellation_token import CancellationToken + + +_logger = logging.getLogger(__name__) + + +class PartitionManager: + """ + Manages the partition event pump execution. + """ + + def __init__(self, host): + self.host = host + self.partition_pumps = {} + self.partition_ids = None + self.run_task = None + self.cancellation_token = CancellationToken() + + async def get_partition_ids_async(self): + """ + Returns a list of all the event hub partition IDs. + + :rtype: list[str] + """ + if not self.partition_ids: + try: + eh_client = EventHubClientAsync( + self.host.eh_config.client_address, + debug=self.host.eph_options.debug_trace, + http_proxy=self.host.eph_options.http_proxy) + try: + eh_info = await eh_client.get_eventhub_info_async() + self.partition_ids = eh_info['partition_ids'] + except Exception as err: # pylint: disable=broad-except + raise Exception("Failed to get partition ids", repr(err)) + finally: + await eh_client.stop_async() + return self.partition_ids + + async def start_async(self): + """ + Intializes the partition checkpoint and lease store and then calls run async. + """ + if self.run_task: + raise Exception("A PartitionManager cannot be started multiple times.") + + partition_count = await self.initialize_stores_async() + _logger.info("%r PartitionCount: %r", self.host.guid, partition_count) + self.run_task = asyncio.ensure_future(self.run_async()) + + async def stop_async(self): + """ + Terminiates the partition manger. + """ + self.cancellation_token.cancel() + if self.run_task and not self.run_task.done(): + await self.run_task + + async def run_async(self): + """ + Starts the run loop and manages exceptions and cleanup. + """ + try: + await self.run_loop_async() + except Exception as err: # pylint: disable=broad-except + _logger.error("Run loop failed %r", err) + + try: + _logger.info("Shutting down all pumps %r", self.host.guid) + await self.remove_all_pumps_async("Shutdown") + except Exception as err: # pylint: disable=broad-except + raise Exception("Failed to remove all pumps {!r}".format(err)) + + async def initialize_stores_async(self): + """ + Intializes the partition checkpoint and lease store ensures that a checkpoint + exists for all partitions. Note in this case checkpoint and lease stores are + the same storage manager construct. + + :return: Returns the number of partitions. + :rtype: int + """ + await self.host.storage_manager.create_checkpoint_store_if_not_exists_async() + partition_ids = await self.get_partition_ids_async() + retry_tasks = [] + for partition_id in partition_ids: + retry_tasks.append( + self.retry_async( + self.host.storage_manager.create_checkpoint_if_not_exists_async, + partition_id=partition_id, + retry_message="Failure creating checkpoint for partition, retrying", + final_failure_message="Out of retries creating checkpoint blob for partition", + max_retries=5, + host_id=self.host.host_name)) + + await asyncio.gather(*retry_tasks) + return len(partition_ids) + + def retry(self, func, partition_id, retry_message, final_failure_message, max_retries, host_id): + """ + Make attempt_renew_lease async call sync. + """ + loop = asyncio.new_event_loop() + loop.run_until_complete(self.retry_async(func, partition_id, retry_message, + final_failure_message, max_retries, host_id)) + + async def retry_async(self, func, partition_id, retry_message, + final_failure_message, max_retries, host_id): + """ + Throws if it runs out of retries. If it returns, action succeeded. + """ + created_okay = False + retry_count = 0 + while not created_okay and retry_count <= max_retries: + try: + await func(partition_id) + created_okay = True + except Exception as err: # pylint: disable=broad-except + _logger.error("%r %r %r %r", retry_message, host_id, partition_id, err) + retry_count += 1 + if not created_okay: + raise Exception(host_id, final_failure_message) + + async def run_loop_async(self): + """ + This is the main execution loop for allocating and manging pumps. + """ + while not self.cancellation_token.is_cancelled: + lease_manager = self.host.storage_manager + # Inspect all leases. + # Acquire any expired leases. + # Renew any leases that currently belong to us. + getting_all_leases = await lease_manager.get_all_leases() + leases_owned_by_others_q = Queue() + renew_tasks = [ + self.attempt_renew_lease_async( + get_lease_task, + owned_by_others_q=leases_owned_by_others_q, + lease_manager=lease_manager) + for get_lease_task in getting_all_leases] + await asyncio.gather(*renew_tasks) + + # Extract all leasees leases_owned_by_others and our_lease_count from the + all_leases = {} + leases_owned_by_others = [] + our_lease_count = 0 + while not leases_owned_by_others_q.empty(): + lease_owned_by_other = leases_owned_by_others_q.get() + # Check if lease is owned by other and append + if lease_owned_by_other[0]: + leases_owned_by_others.append(lease_owned_by_other[1]) + else: + our_lease_count += 1 + all_leases[lease_owned_by_other[1].partition_id] = lease_owned_by_other[1] + + # Grab more leases if available and needed for load balancing + leases_owned_by_others_count = len(leases_owned_by_others) + if leases_owned_by_others_count > 0: + steal_this_lease = self.which_lease_to_steal( + leases_owned_by_others, our_lease_count) + if steal_this_lease: + try: + _logger.info("Lease to steal %r", steal_this_lease.serializable()) + if await lease_manager.acquire_lease_async(steal_this_lease): + _logger.info("Stole lease sucessfully %r %r", + self.host.guid, steal_this_lease.partition_id) + else: + _logger.info("Failed to steal lease for partition %r %r", + self.host.guid, steal_this_lease.partition_id) + except Exception as err: # pylint: disable=broad-except + _logger.error("Failed to steal lease %r", err) + + for partition_id in all_leases: + try: + updated_lease = all_leases[partition_id] + if updated_lease.owner == self.host.host_name: + _logger.debug("Attempting to renew lease %r %r", + self.host.guid, partition_id) + await self.check_and_add_pump_async(partition_id, updated_lease) + else: + _logger.debug("Removing pump due to lost lease.") + await self.remove_pump_async(partition_id, "LeaseLost") + except Exception as err: # pylint: disable=broad-except + _logger.error("Failed to update lease %r", err) + await asyncio.sleep(lease_manager.lease_renew_interval) + + async def check_and_add_pump_async(self, partition_id, lease): + """ + Updates the lease on an exisiting pump. + + :param partition_id: The partition ID. + :type partition_id: str + :param lease: The lease to be used. + :type lease: ~azure.eventprocessorhost.lease.Lease + """ + if partition_id in self.partition_pumps: + # There already is a pump. Make sure the pump is working and replace the lease. + captured_pump = self.partition_pumps[partition_id] + if captured_pump.pump_status == "Errored" or captured_pump.is_closing(): + # The existing pump is bad. Remove it. + await self.remove_pump_async(partition_id, "Shutdown") + else: + # Pump is working, should just replace the lease. + # This is causing a race condition since if the checkpoint is being updated + # when the lease changes then the pump will error and shut down + captured_pump.set_lease(lease) + else: + _logger.info("Starting pump %r %r", self.host.guid, partition_id) + await self.create_new_pump_async(partition_id, lease) + + async def create_new_pump_async(self, partition_id, lease): + """ + Create a new pump thread with a given lease. + + :param partition_id: The partition ID. + :type partition_id: str + :param lease: The lease to be used. + :type lease: ~azure.eventprocessorhost.lease.Lease + """ + loop = asyncio.get_event_loop() + partition_pump = EventHubPartitionPump(self.host, lease) + # Do the put after start, if the start fails then put doesn't happen + loop.create_task(partition_pump.open_async()) + self.partition_pumps[partition_id] = partition_pump + _logger.info("Created new partition pump %r %r", self.host.guid, partition_id) + + async def remove_pump_async(self, partition_id, reason): + """ + Stops a single partiton pump. + + :param partition_id: The partition ID. + :type partition_id: str + :param reason: A reason for closing. + :type reason: str + """ + if partition_id in self.partition_pumps: + captured_pump = self.partition_pumps[partition_id] + if not captured_pump.is_closing(): + await captured_pump.close_async(reason) + # else, pump is already closing/closed, don't need to try to shut it down again + del self.partition_pumps[partition_id] # remove pump + _logger.debug("Removed pump %r %r", self.host.guid, partition_id) + _logger.debug("%r pumps still running", len(self.partition_pumps)) + else: + # PartitionManager main loop tries to remove pump for every partition that the + # host does not own, just to be sure. Not finding a pump for a partition is normal + # and expected most of the time. + _logger.debug("No pump found to remove for this partition %r %r", + self.host.guid, partition_id) + + async def remove_all_pumps_async(self, reason): + """ + Stops all partition pumps + (Note this might be wrong and need to await all tasks before returning done). + + :param reason: A reason for closing. + :type reason: str + :rtype: bool + """ + pump_tasks = [self.remove_pump_async(p_id, reason) for p_id in self.partition_pumps] + await asyncio.gather(*pump_tasks) + return True + + def which_lease_to_steal(self, stealable_leases, have_lease_count): + """ + Determines and return which lease to steal + If the number of leases is a multiple of the number of hosts, then the desired + configuration is that all hosts own the name number of leases, and the + difference between the "biggest" owner and any other is 0. + + If the number of leases is not a multiple of the number of hosts, then the most + even configurationpossible is for some hosts to have (self, leases/hosts) leases + and others to have (self, (self, leases/hosts) + 1). For example, for 16 partitions + distributed over five hosts, the distribution would be 4, 3, 3, 3, 3, or any of the + possible reorderings. + + In either case, if the difference between this host and the biggest owner is 2 or more, + then thesystem is not in the most evenly-distributed configuration, so steal one lease + from the biggest. If there is a tie for biggest, we pick whichever appears first in the + list because it doesn't really matter which "biggest" is trimmed down. + + Stealing one at a time prevents flapping because it reduces the difference between the + biggest and this host by two at a time. If the starting difference is two or greater, + then the difference cannot end up below 0. This host may become tied for biggest, but it + cannot become larger than the host that it is stealing from. + + :param stealable_leases: List of leases to determine which can be stolen. + :type stealable_leases: list[~azure.eventprocessorhost.lease.Lease] + :param have_lease_count: Lease count. + :type have_lease_count: int + :rtype: ~azure.eventprocessorhost.lease.Lease + """ + counts_by_owner = self.count_leases_by_owner(stealable_leases) + biggest_owner = (sorted(counts_by_owner.items(), key=lambda kv: kv[1])).pop() + steal_this_lease = None + if (biggest_owner[1] - have_lease_count) >= 2: + steal_this_lease = [l for l in stealable_leases if l.owner == biggest_owner[0]][0] + + return steal_this_lease + + def count_leases_by_owner(self, leases): # pylint: disable=no-self-use + """ + Returns a dictionary of leases by current owner. + """ + owners = [l.owner for l in leases] + return dict(Counter(owners)) + + def attempt_renew_lease(self, lease_task, owned_by_others_q, lease_manager): + """ + Make attempt_renew_lease async call sync. + """ + loop = asyncio.new_event_loop() + loop.run_until_complete(self.attempt_renew_lease_async(lease_task, owned_by_others_q, lease_manager)) + + async def attempt_renew_lease_async(self, lease_task, owned_by_others_q, lease_manager): + """ + Attempts to renew a potential lease if possible and + marks in the queue as none adds to adds to the queue. + """ + try: + possible_lease = await lease_task + if await possible_lease.is_expired(): + _logger.info("Trying to aquire lease %r %r", + self.host.guid, possible_lease.partition_id) + if await lease_manager.acquire_lease_async(possible_lease): + owned_by_others_q.put((False, possible_lease)) + else: + owned_by_others_q.put((True, possible_lease)) + + elif possible_lease.owner == self.host.host_name: + try: + _logger.debug("Trying to renew lease %r %r", + self.host.guid, possible_lease.partition_id) + if await lease_manager.renew_lease_async(possible_lease): + owned_by_others_q.put((False, possible_lease)) + else: + owned_by_others_q.put((True, possible_lease)) + except Exception as err: # pylint: disable=broad-except + # Update to 'Lease Lost' exception. + _logger.error("Lease lost exception %r %r %r", + err, self.host.guid, possible_lease.partition_id) + owned_by_others_q.put((True, possible_lease)) + else: + owned_by_others_q.put((True, possible_lease)) + + except Exception as err: # pylint: disable=broad-except + _logger.error( + "Failure during getting/acquiring/renewing lease, skipping %r", err) diff --git a/azure-eventhubs/azure/eventprocessorhost/partition_pump.py b/azure-eventhubs/azure/eventprocessorhost/partition_pump.py new file mode 100644 index 000000000000..9dbc55dfd221 --- /dev/null +++ b/azure-eventhubs/azure/eventprocessorhost/partition_pump.py @@ -0,0 +1,159 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# --------------------- + +from abc import abstractmethod +import logging +import asyncio +from azure.eventprocessorhost.partition_context import PartitionContext + + +_logger = logging.getLogger(__name__) + + +class PartitionPump(): + """ + Manages individual connection to a given partition. + """ + + def __init__(self, host, lease): + self.host = host + self.lease = lease + self.pump_status = "Uninitialized" + self.partition_context = None + self.processor = None + self.loop = None + + def run(self): + """ + Makes pump sync so that it can be run in a thread. + """ + self.loop = asyncio.new_event_loop() + self.loop.run_until_complete(self.open_async()) + + def set_pump_status(self, status): + """ + Updates pump status and logs update to console. + """ + self.pump_status = status + _logger.info("%r partition %r", status, self.lease.partition_id) + + def set_lease(self, new_lease): + """ + Sets a new partition lease to be processed by the pump. + + :param lease: The lease to set. + :type lease: ~azure.eventprocessorhost.lease.Lease + """ + if self.partition_context: + self.partition_context.lease = new_lease + self.partition_context.event_processor_context = new_lease.event_processor_context + + async def open_async(self): + """ + Opens partition pump. + """ + self.set_pump_status("Opening") + self.partition_context = PartitionContext(self.host, self.lease.partition_id, + self.host.eh_config.client_address, + self.host.eh_config.consumer_group, + self.loop) + self.partition_context.lease = self.lease + self.partition_context.event_processor_context = self.lease.event_processor_context + self.processor = self.host.event_processor(self.host.event_processor_params) + try: + await self.processor.open_async(self.partition_context) + except Exception as err: # pylint: disable=broad-except + # If the processor won't create or open, only thing we can do here is pass the buck. + # Null it out so we don't try to operate on it further. + await self.process_error_async(err) + self.processor = None + self.set_pump_status("OpenFailed") + + # If Open Async Didn't Fail call OnOpenAsync + if self.pump_status == "Opening": + await self.on_open_async() + + @abstractmethod + async def on_open_async(self): + """ + Event handler for on open event. + """ + + def is_closing(self): + """ + Returns whether pump is closing. + + :rtype: bool + """ + return self.pump_status == "Closing" or self.pump_status == "Closed" + + async def close_async(self, reason): + """ + Safely closes the pump. + + :param reason: The reason for the shutdown. + :type reason: str + """ + self.set_pump_status("Closing") + try: + await self.on_closing_async(reason) + if self.processor: + _logger.info("PartitionPumpInvokeProcessorCloseStart %r %r %r", + self.host.guid, self.partition_context.partition_id, reason) + await self.processor.close_async(self.partition_context, reason) + _logger.info("PartitionPumpInvokeProcessorCloseStart %r %r", + self.host.guid, self.partition_context.partition_id) + except Exception as err: # pylint: disable=broad-except + await self.process_error_async(err) + _logger.error("%r %r %r", self.host.guid, self.partition_context.partition_id, err) + raise err + + if reason == "LeaseLost": + try: + _logger.info("Lease Lost releasing ownership") + await self.host.storage_manager.release_lease_async(self.partition_context.lease) + except Exception as err: # pylint: disable=broad-except + _logger.error("%r %r %r", self.host.guid, self.partition_context.partition_id, err) + raise err + + self.set_pump_status("Closed") + + @abstractmethod + async def on_closing_async(self, reason): + """ + Event handler for on closing event. + + :param reason: The reason for the shutdown. + :type reason: str + """ + + async def process_events_async(self, events): + """ + Process pump events. + + :param events: List of events to be processed. + :type events: list[~azure.eventhub.common.EventData] + """ + if events: + # Synchronize to serialize calls to the processor. The handler is not installed until + # after OpenAsync returns, so ProcessEventsAsync cannot conflict with OpenAsync. There + # could be a conflict between ProcessEventsAsync and CloseAsync, however. All calls to + # CloseAsync are protected by synchronizing too. + try: + last = events[-1] + if last is not None: + self.partition_context.set_offset_and_sequence_number(last) + await self.processor.process_events_async(self.partition_context, events) + except Exception as err: # pylint: disable=broad-except + await self.process_error_async(err) + + async def process_error_async(self, error): + """ + Passes error to the event processor for processing. + + :param error: An error the occurred. + :type error: Exception + """ + await self.processor.process_error_async(self.partition_context, error) diff --git a/azure-eventhubs/conftest.py b/azure-eventhubs/conftest.py new file mode 100644 index 000000000000..237a60918f17 --- /dev/null +++ b/azure-eventhubs/conftest.py @@ -0,0 +1,260 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import pytest +import logging +import sys +import uuid +from logging.handlers import RotatingFileHandler + +# Ignore async tests for Python < 3.5 +collect_ignore = [] +if sys.version_info < (3, 5): + collect_ignore.append("tests/asynctests") + collect_ignore.append("features") + collect_ignore.append("examples/async_examples") +else: + sys.path.append(os.path.join(os.path.dirname(__file__), "tests")) + from asynctests import MockEventProcessor + from azure.eventprocessorhost import EventProcessorHost + from azure.eventprocessorhost import EventHubPartitionPump + from azure.eventprocessorhost import AzureStorageCheckpointLeaseManager + from azure.eventprocessorhost import AzureBlobLease + from azure.eventprocessorhost import EventHubConfig + from azure.eventprocessorhost.lease import Lease + from azure.eventprocessorhost.partition_pump import PartitionPump + from azure.eventprocessorhost.partition_manager import PartitionManager + +from azure import eventhub +from azure.eventhub import EventHubClient, Receiver, Offset + + +def get_logger(filename, level=logging.INFO): + azure_logger = logging.getLogger("azure.eventhub") + azure_logger.setLevel(level) + uamqp_logger = logging.getLogger("uamqp") + uamqp_logger.setLevel(logging.INFO) + + formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') + console_handler = logging.StreamHandler(stream=sys.stdout) + console_handler.setFormatter(formatter) + if not azure_logger.handlers: + azure_logger.addHandler(console_handler) + if not uamqp_logger.handlers: + uamqp_logger.addHandler(console_handler) + + if filename: + file_handler = RotatingFileHandler(filename, maxBytes=5*1024*1024, backupCount=2) + file_handler.setFormatter(formatter) + azure_logger.addHandler(file_handler) + + return azure_logger + + +log = get_logger(None, logging.DEBUG) + + +def create_eventhub(eventhub_config, client=None): + from azure.servicebus.control_client import ServiceBusService, EventHub + hub_name = str(uuid.uuid4()) + hub_value = EventHub(partition_count=2) + client = client or ServiceBusService( + service_namespace=eventhub_config['namespace'], + shared_access_key_name=eventhub_config['key_name'], + shared_access_key_value=eventhub_config['access_key']) + if client.create_event_hub(hub_name, hub=hub_value, fail_on_exist=True): + return hub_name + raise ValueError("EventHub creation failed.") + + +def cleanup_eventhub(servicebus_config, hub_name, client=None): + from azure.servicebus.control_client import ServiceBusService + client = client or ServiceBusService( + service_namespace=eventhub_config['namespace'], + shared_access_key_name=eventhub_config['key_name'], + shared_access_key_value=eventhub_config['access_key']) + client.delete_event_hub(hub_name) + + +@pytest.fixture() +def live_eventhub_config(): + try: + config = {} + config['hostname'] = os.environ['EVENT_HUB_HOSTNAME'] + config['event_hub'] = os.environ['EVENT_HUB_NAME'] + config['key_name'] = os.environ['EVENT_HUB_SAS_POLICY'] + config['access_key'] = os.environ['EVENT_HUB_SAS_KEY'] + config['namespace'] = os.environ['EVENT_HUB_NAMESPACE'] + config['consumer_group'] = "$Default" + config['partition'] = "0" + except KeyError: + pytest.skip("Live EventHub configuration not found.") + else: + return config + + +@pytest.fixture() +def live_eventhub(live_eventhub_config): # pylint: disable=redefined-outer-name + from azure.servicebus.control_client import ServiceBusService + client = ServiceBusService( + service_namespace=live_eventhub_config['namespace'], + shared_access_key_name=live_eventhub_config['key_name'], + shared_access_key_value=live_eventhub_config['access_key']) + try: + hub_name = create_eventhub(live_eventhub_config, client=client) + print("Created EventHub {}".format(hub_name)) + live_eventhub_config['event_hub'] = hub_name + yield live_eventhub_config + finally: + cleanup_eventhub(live_eventhub_config, hub_name, client=client) + print("Deleted EventHub {}".format(hub_name)) + + +@pytest.fixture() +def connection_str(live_eventhub): + return "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( + live_eventhub['hostname'], + live_eventhub['key_name'], + live_eventhub['access_key'], + live_eventhub['event_hub']) + + +@pytest.fixture() +def invalid_hostname(live_eventhub_config): + return "Endpoint=sb://invalid123.servicebus.windows.net/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( + live_eventhub_config['key_name'], + live_eventhub_config['access_key'], + live_eventhub_config['event_hub']) + + +@pytest.fixture() +def invalid_key(live_eventhub_config): + return "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey=invalid;EntityPath={}".format( + live_eventhub_config['hostname'], + live_eventhub_config['key_name'], + live_eventhub_config['event_hub']) + + +@pytest.fixture() +def invalid_policy(live_eventhub_config): + return "Endpoint=sb://{}/;SharedAccessKeyName=invalid;SharedAccessKey={};EntityPath={}".format( + live_eventhub_config['hostname'], + live_eventhub_config['access_key'], + live_eventhub_config['event_hub']) + + +@pytest.fixture() +def iot_connection_str(): + try: + return os.environ['IOTHUB_CONNECTION_STR'] + except KeyError: + pytest.skip("No IotHub connection string found.") + + +@pytest.fixture() +def device_id(): + try: + return os.environ['IOTHUB_DEVICE'] + except KeyError: + pytest.skip("No Iothub device ID found.") + + +@pytest.fixture() +def connstr_receivers(connection_str): + client = EventHubClient.from_connection_string(connection_str, debug=False) + eh_hub_info = client.get_eventhub_info() + partitions = eh_hub_info["partition_ids"] + + recv_offset = Offset("@latest") + receivers = [] + for p in partitions: + receivers.append(client.add_receiver("$default", p, prefetch=500, offset=Offset("@latest"))) + + client.run() + + for r in receivers: + r.receive(timeout=1) + yield connection_str, receivers + + client.stop() + + +@pytest.fixture() +def connstr_senders(connection_str): + client = EventHubClient.from_connection_string(connection_str, debug=True) + eh_hub_info = client.get_eventhub_info() + partitions = eh_hub_info["partition_ids"] + + senders = [] + for p in partitions: + senders.append(client.add_sender(partition=p)) + + client.run() + yield connection_str, senders + client.stop() + + +@pytest.fixture() +def storage_clm(eph): + try: + container = str(uuid.uuid4()) + storage_clm = AzureStorageCheckpointLeaseManager( + os.environ['AZURE_STORAGE_ACCOUNT'], + os.environ['AZURE_STORAGE_ACCESS_KEY'], + container) + except KeyError: + pytest.skip("Live Storage configuration not found.") + try: + storage_clm.initialize(eph) + storage_clm.storage_client.create_container(container) + yield storage_clm + finally: + storage_clm.storage_client.delete_container(container) + + +@pytest.fixture() +def eph(): + try: + storage_clm = AzureStorageCheckpointLeaseManager( + os.environ['AZURE_STORAGE_ACCOUNT'], + os.environ['AZURE_STORAGE_ACCESS_KEY'], + "lease") + NAMESPACE = os.environ.get('EVENT_HUB_NAMESPACE') + EVENTHUB = os.environ.get('EVENT_HUB_NAME') + USER = os.environ.get('EVENT_HUB_SAS_POLICY') + KEY = os.environ.get('EVENT_HUB_SAS_KEY') + + eh_config = EventHubConfig(NAMESPACE, EVENTHUB, USER, KEY, consumer_group="$default") + host = EventProcessorHost( + MockEventProcessor, + eh_config, + storage_clm) + except KeyError: + pytest.skip("Live EventHub configuration not found.") + return host + + +@pytest.fixture() +def eh_partition_pump(eph): + lease = AzureBlobLease() + lease.with_partition_id("1") + partition_pump = EventHubPartitionPump(eph, lease) + return partition_pump + + +@pytest.fixture() +def partition_pump(eph): + lease = Lease() + lease.with_partition_id("1") + partition_pump = PartitionPump(eph, lease) + return partition_pump + + +@pytest.fixture() +def partition_manager(eph): + partition_manager = PartitionManager(eph) + return partition_manager diff --git a/azure-eventhubs/dev_requirements.txt b/azure-eventhubs/dev_requirements.txt new file mode 100644 index 000000000000..956c881adc8c --- /dev/null +++ b/azure-eventhubs/dev_requirements.txt @@ -0,0 +1,9 @@ +pytest>=3.4.1 +pytest-asyncio>=0.8.0; python_version > '3.4' +azure-servicebus==0.50.0 +docutils>=0.14 +pygments>=2.2.0 +pylint==2.3.0; python_version >= '3.4' +pylint==1.8.4; python_version < '3.4' +behave==1.2.6 +wheel \ No newline at end of file diff --git a/azure-eventhubs/examples/__init__.py b/azure-eventhubs/examples/__init__.py new file mode 100644 index 000000000000..94facc8618df --- /dev/null +++ b/azure-eventhubs/examples/__init__.py @@ -0,0 +1,21 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import sys +import logging + +def get_logger(level): + azure_logger = logging.getLogger("azure.eventhub") + azure_logger.setLevel(level) + handler = logging.StreamHandler(stream=sys.stdout) + handler.setFormatter(logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')) + if not azure_logger.handlers: + azure_logger.addHandler(handler) + + uamqp_logger = logging.getLogger("uamqp") + uamqp_logger.setLevel(logging.INFO) + if not uamqp_logger.handlers: + uamqp_logger.addHandler(handler) + return azure_logger diff --git a/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py b/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py new file mode 100644 index 000000000000..7f18fc97b756 --- /dev/null +++ b/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py @@ -0,0 +1,179 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import pytest +import datetime +import os +import time +import logging +import asyncio + +from azure.eventhub import EventHubError + + +@pytest.mark.asyncio +async def test_example_eventhub_async_send_and_receive(live_eventhub_config): + # [START create_eventhub_client_async] + from azure.eventhub import EventHubClientAsync + import os + connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( + os.environ['EVENT_HUB_HOSTNAME'], + os.environ['EVENT_HUB_SAS_POLICY'], + os.environ['EVENT_HUB_SAS_KEY'], + os.environ['EVENT_HUB_NAME']) + client = EventHubClientAsync.from_connection_string(connection_str) + # [END create_eventhub_client_async] + + from azure.eventhub import EventData, Offset + + # [START create_eventhub_client_async_sender] + client = EventHubClientAsync.from_connection_string(connection_str) + # Add a async sender to the async client object. + sender = client.add_async_sender(partition="0") + # [END create_eventhub_client_async_sender] + + # [START create_eventhub_client_async_receiver] + client = EventHubClientAsync.from_connection_string(connection_str) + # Add a async receiver to the async client object. + receiver = client.add_async_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + # [END create_eventhub_client_async_receiver] + + # [START create_eventhub_client_async_epoch_receiver] + client = EventHubClientAsync.from_connection_string(connection_str) + # Add a async receiver to the async client object. + epoch_receiver = client.add_async_epoch_receiver(consumer_group="$default", partition="0", epoch=42) + # [END create_eventhub_client_async_epoch_receiver] + + # [START eventhub_client_run_async] + client = EventHubClientAsync.from_connection_string(connection_str) + # Add AsyncSenders/AsyncReceivers + try: + # Opens the connection and starts running all AsyncSender/AsyncReceiver clients. + await client.run_async() + # Start sending and receiving + except: + raise + finally: + await client.stop_async() + # [END eventhub_client_run_async] + + + client = EventHubClientAsync.from_connection_string(connection_str) + sender = client.add_async_sender(partition="0") + receiver = client.add_async_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + try: + # Opens the connection and starts running all AsyncSender/AsyncReceiver clients. + await client.run_async() + + # [START eventhub_client_async_send] + event_data = EventData(b"A single event") + await sender.send(event_data) + # [END eventhub_client_async_send] + time.sleep(1) + # [START eventhub_client_async_receive] + logger = logging.getLogger("azure.eventhub") + received = await receiver.receive(timeout=5) + for event_data in received: + logger.info("Message received:{}".format(event_data.body_as_str())) + # [END eventhub_client_async_receive] + assert len(received) == 1 + assert received[0].body_as_str() == "A single event" + assert list(received[-1].body)[0] == b"A single event" + except: + raise + finally: + await client.stop_async() + + # [START eventhub_client_async_stop] + client = EventHubClientAsync.from_connection_string(connection_str) + # Add AsyncSenders/AsyncReceivers + try: + # Opens the connection and starts running all AsyncSender/AsyncReceiver clients. + await client.run_async() + # Start sending and receiving + except: + raise + finally: + await client.stop_async() + # [END eventhub_client_async_stop] + + +@pytest.mark.asyncio +async def test_example_eventhub_async_sender_ops(live_eventhub_config, connection_str): + import os + # [START create_eventhub_client_async_sender_instance] + from azure.eventhub import EventHubClientAsync + + client = EventHubClientAsync.from_connection_string(connection_str) + sender = client.add_async_sender(partition="0") + # [END create_eventhub_client_async_sender_instance] + + # [START eventhub_client_async_sender_open] + client = EventHubClientAsync.from_connection_string(connection_str) + sender = client.add_async_sender(partition="0") + try: + # Open the Async Sender using the supplied conneciton. + await sender.open_async() + # Start sending + except: + raise + finally: + # Close down the send handler. + await sender.close_async() + # [END eventhub_client_async_sender_open] + + # [START eventhub_client_async_sender_close] + client = EventHubClientAsync.from_connection_string(connection_str) + sender = client.add_async_sender(partition="0") + try: + # Open the Async Sender using the supplied conneciton. + await sender.open_async() + # Start sending + except: + raise + finally: + # Close down the send handler. + await sender.close_async() + # [END eventhub_client_async_sender_close] + + +@pytest.mark.asyncio +async def test_example_eventhub_async_receiver_ops(live_eventhub_config, connection_str): + import os + # [START create_eventhub_client_async_receiver_instance] + from azure.eventhub import EventHubClientAsync, Offset + + client = EventHubClientAsync.from_connection_string(connection_str) + receiver = client.add_async_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + # [END create_eventhub_client_async_receiver_instance] + + # [START eventhub_client_async_receiver_open] + client = EventHubClientAsync.from_connection_string(connection_str) + receiver = client.add_async_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + try: + # Open the Async Receiver using the supplied conneciton. + await receiver.open_async() + # Start receiving + except: + raise + finally: + # Close down the receive handler. + await receiver.close_async() + # [END eventhub_client_async_receiver_open] + + # [START eventhub_client_async_receiver_close] + client = EventHubClientAsync.from_connection_string(connection_str) + receiver = client.add_async_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + try: + # Open the Async Receiver using the supplied conneciton. + await receiver.open_async() + # Start receiving + except: + raise + finally: + # Close down the receive handler. + await receiver.close_async() + # [END eventhub_client_async_receiver_close] \ No newline at end of file diff --git a/azure-eventhubs/examples/batch_send.py b/azure-eventhubs/examples/batch_send.py new file mode 100644 index 000000000000..7cbf6259d661 --- /dev/null +++ b/azure-eventhubs/examples/batch_send.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python + +""" +An example to show batch sending events to an Event Hub. +""" + +# pylint: disable=C0111 + +import sys +import logging +import datetime +import time +import os + +from azure.eventhub import EventHubClient, Sender, EventData + +import examples +logger = examples.get_logger(logging.INFO) + +# Address can be in either of these formats: +# "amqps://:@.servicebus.windows.net/myeventhub" +# "amqps://.servicebus.windows.net/myeventhub" +ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') + +# SAS policy and key are not required if they are encoded in the URL +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') + + +def data_generator(): + for i in range(1500): + logger.info("Yielding message {}".format(i)) + yield b"Hello world" + + +try: + if not ADDRESS: + raise ValueError("No EventHubs URL supplied.") + + client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) + sender = client.add_sender(partition="1") + client.run() + try: + start_time = time.time() + data = EventData(batch=data_generator()) + sender.send(data) + except: + raise + finally: + end_time = time.time() + client.stop() + run_time = end_time - start_time + logger.info("Runtime: {} seconds".format(run_time)) + +except KeyboardInterrupt: + pass diff --git a/azure-eventhubs/examples/batch_transfer.py b/azure-eventhubs/examples/batch_transfer.py new file mode 100644 index 000000000000..676ac6c3e2ea --- /dev/null +++ b/azure-eventhubs/examples/batch_transfer.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +""" +An example to show batch sending events to an Event Hub. +""" + +# pylint: disable=C0111 + +import sys +import logging +import datetime +import time +import os + +from azure.eventhub import EventHubClient, Sender, EventData + +import examples +logger = examples.get_logger(logging.INFO) + +# Address can be in either of these formats: +# "amqps://:@.servicebus.windows.net/myeventhub" +# "amqps://.servicebus.windows.net/myeventhub" +ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') + +# SAS policy and key are not required if they are encoded in the URL +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') + + +def callback(outcome, condition): + logger.info("Message sent. Outcome: {}, Condition: {}".format( + outcome, condition)) + + +def data_generator(): + for i in range(1500): + logger.info("Yielding message {}".format(i)) + yield b"Hello world" + + +try: + if not ADDRESS: + raise ValueError("No EventHubs URL supplied.") + + client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) + sender = client.add_sender() + client.run() + try: + start_time = time.time() + data = EventData(batch=data_generator()) + sender.transfer(data, callback=callback) + sender.wait() + except: + raise + finally: + end_time = time.time() + client.stop() + run_time = end_time - start_time + logger.info("Runtime: {} seconds".format(run_time)) + +except KeyboardInterrupt: + pass diff --git a/azure-eventhubs/examples/eph.py b/azure-eventhubs/examples/eph.py new file mode 100644 index 000000000000..39f0fbba4179 --- /dev/null +++ b/azure-eventhubs/examples/eph.py @@ -0,0 +1,129 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import logging +import asyncio +import sys +import os +import signal +import functools + +from azure.eventprocessorhost import ( + AbstractEventProcessor, + AzureStorageCheckpointLeaseManager, + EventHubConfig, + EventProcessorHost, + EPHOptions) + +import examples +logger = examples.get_logger(logging.INFO) + + +class EventProcessor(AbstractEventProcessor): + """ + Example Implmentation of AbstractEventProcessor + """ + + def __init__(self, params=None): + """ + Init Event processor + """ + super().__init__(params) + self._msg_counter = 0 + + async def open_async(self, context): + """ + Called by processor host to initialize the event processor. + """ + logger.info("Connection established {}".format(context.partition_id)) + + async def close_async(self, context, reason): + """ + Called by processor host to indicate that the event processor is being stopped. + :param context: Information about the partition + :type context: ~azure.eventprocessorhost.PartitionContext + """ + logger.info("Connection closed (reason {}, id {}, offset {}, sq_number {})".format( + reason, + context.partition_id, + context.offset, + context.sequence_number)) + + async def process_events_async(self, context, messages): + """ + Called by the processor host when a batch of events has arrived. + This is where the real work of the event processor is done. + :param context: Information about the partition + :type context: ~azure.eventprocessorhost.PartitionContext + :param messages: The events to be processed. + :type messages: list[~azure.eventhub.common.EventData] + """ + logger.info("Events processed {}".format(context.sequence_number)) + await context.checkpoint_async() + + async def process_error_async(self, context, error): + """ + Called when the underlying client experiences an error while receiving. + EventProcessorHost will take care of recovering from the error and + continuing to pump messages,so no action is required from + :param context: Information about the partition + :type context: ~azure.eventprocessorhost.PartitionContext + :param error: The error that occured. + """ + logger.error("Event Processor Error {!r}".format(error)) + + +async def wait_and_close(host): + """ + Run EventProcessorHost for 2 minutes then shutdown. + """ + await asyncio.sleep(60) + await host.close_async() + + +try: + loop = asyncio.get_event_loop() + + # Storage Account Credentials + STORAGE_ACCOUNT_NAME = os.environ.get('AZURE_STORAGE_ACCOUNT') + STORAGE_KEY = os.environ.get('AZURE_STORAGE_ACCESS_KEY') + LEASE_CONTAINER_NAME = "leases" + + NAMESPACE = os.environ.get('EVENT_HUB_NAMESPACE') + EVENTHUB = os.environ.get('EVENT_HUB_NAME') + USER = os.environ.get('EVENT_HUB_SAS_POLICY') + KEY = os.environ.get('EVENT_HUB_SAS_KEY') + + # Eventhub config and storage manager + eh_config = EventHubConfig(NAMESPACE, EVENTHUB, USER, KEY, consumer_group="$default") + eh_options = EPHOptions() + eh_options.release_pump_on_timeout = True + eh_options.debug_trace = False + storage_manager = AzureStorageCheckpointLeaseManager( + STORAGE_ACCOUNT_NAME, STORAGE_KEY, LEASE_CONTAINER_NAME) + + # Event loop and host + host = EventProcessorHost( + EventProcessor, + eh_config, + storage_manager, + ep_params=["param1","param2"], + eph_options=eh_options, + loop=loop) + + tasks = asyncio.gather( + host.open_async(), + wait_and_close(host)) + loop.run_until_complete(tasks) + +except KeyboardInterrupt: + # Canceling pending tasks and stopping the loop + for task in asyncio.Task.all_tasks(): + task.cancel() + loop.run_forever() + tasks.exception() + +finally: + loop.stop() diff --git a/azure-eventhubs/examples/iothub_recv.py b/azure-eventhubs/examples/iothub_recv.py new file mode 100644 index 000000000000..dd1f6a566771 --- /dev/null +++ b/azure-eventhubs/examples/iothub_recv.py @@ -0,0 +1,27 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +An example to show receiving events from an IoT Hub partition. +""" +from azure import eventhub +from azure.eventhub import EventData, EventHubClient, Offset + +import logging +logger = logging.getLogger('azure.eventhub') + +CONNSTR = os.environ['IOTHUB_CONNECTION_STR'] + +client = EventHubClient.from_iothub_connection_string(iot_connection_str, debug=True) +receiver = client.add_receiver("$default", "0", operation='/messages/events') +try: + client.run() + eh_info = client.get_eventhub_info() + print(eh_info) + + received = receiver.receive(timeout=5) + print(received) +finally: + client.stop() diff --git a/azure-eventhubs/examples/recv.py b/azure-eventhubs/examples/recv.py new file mode 100644 index 000000000000..f43d03be9ce5 --- /dev/null +++ b/azure-eventhubs/examples/recv.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +An example to show receiving events from an Event Hub partition. +""" +import os +import sys +import logging +import time +from azure.eventhub import EventHubClient, Receiver, Offset + +import examples +logger = examples.get_logger(logging.INFO) + +# Address can be in either of these formats: +# "amqps://:@.servicebus.windows.net/myeventhub" +# "amqps://.servicebus.windows.net/myeventhub" +ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') + +# SAS policy and key are not required if they are encoded in the URL +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') +CONSUMER_GROUP = "$default" +OFFSET = Offset("-1") +PARTITION = "0" + + +total = 0 +last_sn = -1 +last_offset = "-1" +client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) +try: + receiver = client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=5000, offset=OFFSET) + client.run() + start_time = time.time() + batch = receiver.receive(timeout=5000) + while batch: + for event_data in batch: + last_offset = event_data.offset + last_sn = event_data.sequence_number + print("Received: {}, {}".format(last_offset.value, last_sn)) + print(event_data.body_as_str()) + total += 1 + batch = receiver.receive(timeout=5000) + + end_time = time.time() + client.stop() + run_time = end_time - start_time + print("Received {} messages in {} seconds".format(total, run_time)) + +except KeyboardInterrupt: + pass +finally: + client.stop() \ No newline at end of file diff --git a/azure-eventhubs/examples/recv_async.py b/azure-eventhubs/examples/recv_async.py new file mode 100644 index 000000000000..04d922649b3c --- /dev/null +++ b/azure-eventhubs/examples/recv_async.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +An example to show running concurrent receivers. +""" + +import os +import sys +import time +import logging +import asyncio +from azure.eventhub import Offset, EventHubClientAsync, AsyncReceiver + +import examples +logger = examples.get_logger(logging.INFO) + +# Address can be in either of these formats: +# "amqps://:@.servicebus.windows.net/myeventhub" +# "amqps://.servicebus.windows.net/myeventhub" +ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') + +# SAS policy and key are not required if they are encoded in the URL +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') +CONSUMER_GROUP = "$default" +OFFSET = Offset("-1") + + +async def pump(client, partition): + receiver = client.add_async_receiver(CONSUMER_GROUP, partition, OFFSET, prefetch=5) + await client.run_async() + total = 0 + start_time = time.time() + for event_data in await receiver.receive(timeout=10): + last_offset = event_data.offset + last_sn = event_data.sequence_number + print("Received: {}, {}".format(last_offset.value, last_sn)) + total += 1 + end_time = time.time() + run_time = end_time - start_time + print("Received {} messages in {} seconds".format(total, run_time)) + +try: + if not ADDRESS: + raise ValueError("No EventHubs URL supplied.") + + loop = asyncio.get_event_loop() + client = EventHubClientAsync(ADDRESS, debug=False, username=USER, password=KEY) + tasks = [ + asyncio.ensure_future(pump(client, "0")), + asyncio.ensure_future(pump(client, "1"))] + loop.run_until_complete(asyncio.wait(tasks)) + loop.run_until_complete(client.stop_async()) + loop.close() + +except KeyboardInterrupt: + pass diff --git a/azure-eventhubs/examples/recv_batch.py b/azure-eventhubs/examples/recv_batch.py new file mode 100644 index 000000000000..9478f51feb21 --- /dev/null +++ b/azure-eventhubs/examples/recv_batch.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +An example to show receiving events from an Event Hub partition and processing +the event in on_event_data callback. + +""" +import os +import sys +import logging +from azure.eventhub import EventHubClient, Receiver, Offset + +import examples +logger = examples.get_logger(logging.INFO) + +# Address can be in either of these formats: +# "amqps://:@.servicebus.windows.net/myeventhub" +# "amqps://.servicebus.windows.net/myeventhub" +ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') + +# SAS policy and key are not required if they are encoded in the URL +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') +CONSUMER_GROUP = "$default" +OFFSET = Offset("-1") +PARTITION = "0" + + +total = 0 +last_sn = -1 +last_offset = "-1" +client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) +try: + receiver = client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=100, offset=OFFSET) + client.run() + batched_events = receiver.receive(max_batch_size=10) + for event_data in batched_events: + last_offset = event_data.offset.value + last_sn = event_data.sequence_number + total += 1 + print("Partition {}, Received {}, sn={} offset={}".format( + PARTITION, + total, + last_sn, + last_offset)) + +except KeyboardInterrupt: + pass +finally: + client.stop() \ No newline at end of file diff --git a/azure-eventhubs/examples/recv_epoch.py b/azure-eventhubs/examples/recv_epoch.py new file mode 100644 index 000000000000..f9f291ed6bc3 --- /dev/null +++ b/azure-eventhubs/examples/recv_epoch.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +An example to show receiving events from an Event Hub partition as an epoch receiver. +""" + +import os +import sys +import time +import logging +import asyncio +from azure.eventhub import Offset, EventHubClientAsync, AsyncReceiver + +import examples +logger = examples.get_logger(logging.INFO) + +# Address can be in either of these formats: +# "amqps://:@.servicebus.windows.net/myeventhub" +# "amqps://.servicebus.windows.net/myeventhub" +ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') + +# SAS policy and key are not required if they are encoded in the URL +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') +CONSUMER_GROUP = "$default" +EPOCH = 42 +PARTITION = "0" + + +async def pump(client, epoch): + receiver = client.add_async_epoch_receiver(CONSUMER_GROUP, PARTITION, epoch=epoch) + await client.run_async() + total = 0 + start_time = time.time() + for event_data in await receiver.receive(timeout=5): + last_offset = event_data.offset + last_sn = event_data.sequence_number + total += 1 + end_time = time.time() + run_time = end_time - start_time + await client.stop_async() + print("Received {} messages in {} seconds".format(total, run_time)) + +try: + if not ADDRESS: + raise ValueError("No EventHubs URL supplied.") + + loop = asyncio.get_event_loop() + client = EventHubClientAsync(ADDRESS, debug=False, username=USER, password=KEY) + loop.run_until_complete(pump(client, 20)) + loop.close() + +except KeyboardInterrupt: + pass diff --git a/azure-eventhubs/examples/send.py b/azure-eventhubs/examples/send.py new file mode 100644 index 000000000000..6881b0d578ee --- /dev/null +++ b/azure-eventhubs/examples/send.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +""" +An example to show sending events to an Event Hub partition. +""" + +# pylint: disable=C0111 + +import sys +import logging +import datetime +import time +import os + +from azure.eventhub import EventHubClient, Sender, EventData + +import examples +logger = examples.get_logger(logging.INFO) + + +# Address can be in either of these formats: +# "amqps://:@.servicebus.windows.net/myeventhub" +# "amqps://.servicebus.windows.net/myeventhub" +ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') + +# SAS policy and key are not required if they are encoded in the URL +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') + +try: + if not ADDRESS: + raise ValueError("No EventHubs URL supplied.") + + client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) + sender = client.add_sender(partition="0") + client.run() + try: + start_time = time.time() + for i in range(100): + logger.info("Sending message: {}".format(i)) + sender.send(EventData(str(i))) + except: + raise + finally: + end_time = time.time() + client.stop() + run_time = end_time - start_time + logger.info("Runtime: {} seconds".format(run_time)) + +except KeyboardInterrupt: + pass diff --git a/azure-eventhubs/examples/send_async.py b/azure-eventhubs/examples/send_async.py new file mode 100644 index 000000000000..248fdcf853b9 --- /dev/null +++ b/azure-eventhubs/examples/send_async.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python + +""" +An example to show sending events asynchronously to an Event Hub with partition keys. +""" + +# pylint: disable=C0111 + +import sys +import logging +import time +import asyncio +import os + +from azure.eventhub import EventData, EventHubClientAsync, AsyncSender + +import examples +logger = examples.get_logger(logging.INFO) + +# Address can be in either of these formats: +# "amqps://:@.servicebus.windows.net/myeventhub" +# "amqps://.servicebus.windows.net/myeventhub" +ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') + +# SAS policy and key are not required if they are encoded in the URL +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') + + +async def run(client): + sender = client.add_async_sender() + await client.run_async() + await send(sender, 4) + + +async def send(snd, count): + for i in range(count): + logger.info("Sending message: {}".format(i)) + data = EventData(str(i)) + data.partition_key = b'SamplePartitionKey' + await snd.send(data) + +try: + if not ADDRESS: + raise ValueError("No EventHubs URL supplied.") + + loop = asyncio.get_event_loop() + client = EventHubClientAsync(ADDRESS, debug=True, username=USER, password=KEY) + tasks = asyncio.gather( + run(client), + run(client)) + start_time = time.time() + loop.run_until_complete(tasks) + loop.run_until_complete(client.stop_async()) + end_time = time.time() + run_time = end_time - start_time + logger.info("Runtime: {} seconds".format(run_time)) + loop.close() + +except KeyboardInterrupt: + pass diff --git a/azure-eventhubs/examples/test_examples_eventhub.py b/azure-eventhubs/examples/test_examples_eventhub.py new file mode 100644 index 000000000000..ffd53e02fd3e --- /dev/null +++ b/azure-eventhubs/examples/test_examples_eventhub.py @@ -0,0 +1,254 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import pytest +import datetime +import os +import time +import logging + +from azure.eventhub import EventHubError + + +def create_eventhub_client(live_eventhub_config): + # [START create_eventhub_client] + import os + from azure.eventhub import EventHubClient + + address = os.environ['EVENT_HUB_ADDRESS'] + shared_access_policy = os.environ['EVENT_HUB_SAS_POLICY'] + shared_access_key = os.environ['EVENT_HUB_SAS_KEY'] + + client = EventHubClient( + address=address, + username=shared_access_policy, + password=shared_access_key) + # [END create_eventhub_client] + return client + + +def create_eventhub_client_from_sas_token(live_eventhub_config): + # [START create_eventhub_client_sas_token] + import os + from azure.eventhub import EventHubClient + + address = os.environ['EVENT_HUB_ADDRESS'] + sas_token = os.environ['EVENT_HUB_SAS_TOKEN'] + + client = EventHubClient.from_sas_token( + address=address, + sas_token=sas_token) + # [END create_eventhub_client_sas_token] + + +def create_eventhub_client_from_iothub_connection_string(live_eventhub_config): + # [START create_eventhub_client_iot_connstr] + import os + from azure.eventhub import EventHubClient + + iot_connection_str = os.environ['IOTHUB_CONNECTION_STR'] + client = EventHubClient.from_iothub_connection_string(iot_connection_str) + # [END create_eventhub_client_iot_connstr] + + +def test_example_eventhub_sync_send_and_receive(live_eventhub_config): + # [START create_eventhub_client_connstr] + import os + from azure.eventhub import EventHubClient + + connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( + os.environ['EVENT_HUB_HOSTNAME'], + os.environ['EVENT_HUB_SAS_POLICY'], + os.environ['EVENT_HUB_SAS_KEY'], + os.environ['EVENT_HUB_NAME']) + client = EventHubClient.from_connection_string(connection_str) + # [END create_eventhub_client_connstr] + + from azure.eventhub import EventData, Offset + + # [START create_eventhub_client_sender] + client = EventHubClient.from_connection_string(connection_str) + # Add a sender to the client object. + sender = client.add_sender(partition="0") + # [END create_eventhub_client_sender] + + # [START create_eventhub_client_receiver] + client = EventHubClient.from_connection_string(connection_str) + # Add a receiver to the client object. + receiver = client.add_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + # [END create_eventhub_client_receiver] + + # [START create_eventhub_client_epoch_receiver] + client = EventHubClient.from_connection_string(connection_str) + # Add a receiver to the client object with an epoch value. + epoch_receiver = client.add_epoch_receiver(consumer_group="$default", partition="0", epoch=42) + # [END create_eventhub_client_epoch_receiver] + + # [START eventhub_client_run] + client = EventHubClient.from_connection_string(connection_str) + # Add Senders/Receivers + try: + client.run() + # Start sending and receiving + except: + raise + finally: + client.stop() + # [END eventhub_client_run] + + client = EventHubClient.from_connection_string(connection_str) + sender = client.add_sender(partition="0") + receiver = client.add_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + try: + # Opens the connection and starts running all Sender/Receiver clients. + client.run() + # Start sending and receiving + + # [START create_event_data] + event_data = EventData("String data") + event_data = EventData(b"Bytes data") + event_data = EventData([b"A", b"B", b"C"]) + + def batched(): + for i in range(10): + yield "Batch data, Event number {}".format(i) + + event_data = EventData(batch=batched()) + # [END create_event_data] + + # [START eventhub_client_sync_send] + event_data = EventData(b"A single event") + sender.send(event_data) + # [END eventhub_client_sync_send] + time.sleep(1) + + # [START eventhub_client_sync_receive] + logger = logging.getLogger("azure.eventhub") + received = receiver.receive(timeout=5, max_batch_size=1) + for event_data in received: + logger.info("Message received:{}".format(event_data.body_as_str())) + # [END eventhub_client_sync_receive] + assert len(received) == 1 + assert received[0].body_as_str() == "A single event" + assert list(received[-1].body)[0] == b"A single event" + except: + raise + + finally: + client.stop() + + # [START eventhub_client_stop] + client = EventHubClient.from_connection_string(connection_str) + # Add Senders/Receivers + try: + client.run() + # Start sending and receiving + except: + raise + finally: + client.stop() + # [END eventhub_client_stop] + + +def test_example_eventhub_transfer(connection_str): + import os + from azure.eventhub import EventHubClient, EventData + + client = EventHubClient.from_connection_string(connection_str) + sender = client.add_sender() + + try: + client.run() + # [START eventhub_client_transfer] + logger = logging.getLogger("azure.eventhub") + def callback(outcome, condition): + logger.info("Message sent. Outcome: {}, Condition: {}".format( + outcome, condition)) + + event_data = EventData(b"A single event") + sender.transfer(event_data, callback=callback) + sender.wait() + # [END eventhub_client_transfer] + except: + raise + finally: + client.stop() + + +def test_example_eventhub_sync_sender_ops(live_eventhub_config, connection_str): + import os + # [START create_eventhub_client_sender_instance] + from azure.eventhub import EventHubClient + + client = EventHubClient.from_connection_string(connection_str) + sender = client.add_sender(partition="0") + # [END create_eventhub_client_sender_instance] + + # [START eventhub_client_sender_open] + client = EventHubClient.from_connection_string(connection_str) + sender = client.add_sender(partition="0") + try: + # Open the Sender using the supplied conneciton. + sender.open() + # Start sending + except: + raise + finally: + # Close down the send handler. + sender.close() + # [END eventhub_client_sender_open] + + # [START eventhub_client_sender_close] + client = EventHubClient.from_connection_string(connection_str) + sender = client.add_sender(partition="0") + try: + # Open the Sender using the supplied conneciton. + sender.open() + # Start sending + except: + raise + finally: + # Close down the send handler. + sender.close() + # [END eventhub_client_sender_close] + + +def test_example_eventhub_sync_receiver_ops(live_eventhub_config, connection_str): + import os + # [START create_eventhub_client_receiver_instance] + from azure.eventhub import EventHubClient, Offset + + client = EventHubClient.from_connection_string(connection_str) + receiver = client.add_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + # [END create_eventhub_client_receiver_instance] + + # [START eventhub_client_receiver_open] + client = EventHubClient.from_connection_string(connection_str) + receiver = client.add_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + try: + # Open the Receiver using the supplied conneciton. + receiver.open() + # Start receiving + except: + raise + finally: + # Close down the receive handler. + receiver.close() + # [END eventhub_client_receiver_open] + + # [START eventhub_client_receiver_close] + client = EventHubClient.from_connection_string(connection_str) + receiver = client.add_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + try: + # Open the Receiver using the supplied conneciton. + receiver.open() + # Start receiving + except: + raise + finally: + # Close down the receive handler. + receiver.close() + # [END eventhub_client_receiver_close] \ No newline at end of file diff --git a/azure-eventhubs/examples/transfer.py b/azure-eventhubs/examples/transfer.py new file mode 100644 index 000000000000..5190add90f5f --- /dev/null +++ b/azure-eventhubs/examples/transfer.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python + +""" +An example to show sending events to an Event Hub. +""" + +# pylint: disable=C0111 + +import sys +import logging +import datetime +import time +import os + +from azure.eventhub import EventHubClient, Sender, EventData + +import examples +logger = examples.get_logger(logging.INFO) + + +# Address can be in either of these formats: +# "amqps://:@.servicebus.windows.net/myeventhub" +# "amqps://.servicebus.windows.net/myeventhub" +ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') + +# SAS policy and key are not required if they are encoded in the URL +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') + + +def callback(outcome, condition): + logger.info("Message sent. Outcome: {}, Condition: {}".format( + outcome, condition)) + + +try: + if not ADDRESS: + raise ValueError("No EventHubs URL supplied.") + + client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) + sender = client.add_sender(partition="1") + client.run() + try: + start_time = time.time() + for i in range(100): + sender.transfer(EventData(str(i)), callback=callback) + logger.info("Queued 100 messages.") + sender.wait() + logger.info("Finished processing queue.") + except: + raise + finally: + end_time = time.time() + client.stop() + run_time = end_time - start_time + logger.info("Runtime: {} seconds".format(run_time)) + +except KeyboardInterrupt: + pass diff --git a/azure-eventhubs/setup.cfg b/azure-eventhubs/setup.cfg new file mode 100644 index 000000000000..3480374bc2f2 --- /dev/null +++ b/azure-eventhubs/setup.cfg @@ -0,0 +1,2 @@ +[bdist_wheel] +universal=1 \ No newline at end of file diff --git a/azure-eventhubs/setup.py b/azure-eventhubs/setup.py new file mode 100644 index 000000000000..be35260e926a --- /dev/null +++ b/azure-eventhubs/setup.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python + +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import re +import os.path +from io import open +from setuptools import find_packages, setup + + +# Change the PACKAGE_NAME only to change folder and different name +PACKAGE_NAME = "azure-eventhub" +PACKAGE_PPRINT_NAME = "Event Hubs" + +# a-b-c => a/b/c +package_folder_path = PACKAGE_NAME.replace('-', '/') +# a-b-c => a.b.c +namespace_name = PACKAGE_NAME.replace('-', '.') + +# Version extraction inspired from 'requests' +with open(os.path.join(package_folder_path, '__init__.py'), 'r') as fd: + version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', + fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError('Cannot find version information') + +with open('README.rst') as f: + readme = f.read() +with open('HISTORY.rst') as f: + history = f.read() + +setup( + name=PACKAGE_NAME, + version=version, + description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME), + long_description=readme + '\n\n' + history, + license='MIT License', + author='Microsoft Corporation', + author_email='azpysdkhelp@microsoft.com', + url='https://github.com/Azure/azure-sdk-for-python', + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'License :: OSI Approved :: MIT License', + ], + zip_safe=False, + packages=find_packages(exclude=[ + "azure", + "examples", + "tests", + "tests.asynctests"]), + install_requires=[ + 'uamqp~=1.1.0', + 'msrestazure>=0.4.32,<2.0.0', + 'azure-common~=1.1', + 'azure-storage-blob~=1.3' + ] +) diff --git a/azure-eventhubs/tests/asynctests/__init__.py b/azure-eventhubs/tests/asynctests/__init__.py new file mode 100644 index 000000000000..dc00a0fcfcae --- /dev/null +++ b/azure-eventhubs/tests/asynctests/__init__.py @@ -0,0 +1,54 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import asyncio +import logging + +from azure.eventprocessorhost.abstract_event_processor import AbstractEventProcessor + + +class MockEventProcessor(AbstractEventProcessor): + """ + Mock Implmentation of AbstractEventProcessor for testing + """ + def __init__(self, params=None): + """ + Init Event processor + """ + self.params = params + self._msg_counter = 0 + + async def open_async(self, context): + """ + Called by processor host to initialize the event processor. + """ + logging.info("Connection established {}".format(context.partition_id)) + + async def close_async(self, context, reason): + """ + Called by processor host to indicate that the event processor is being stopped. + (Params) Context:Information about the partition + """ + logging.info("Connection closed (reason {}, id {}, offset {}, sq_number {})".format( + reason, context.partition_id, context.offset, context.sequence_number)) + + async def process_events_async(self, context, messages): + """ + Called by the processor host when a batch of events has arrived. + This is where the real work of the event processor is done. + (Params) Context: Information about the partition, Messages: The events to be processed. + """ + logging.info("Events processed {} {}".format(context.partition_id, messages)) + await context.checkpoint_async() + + async def process_error_async(self, context, error): + """ + Called when the underlying client experiences an error while receiving. + EventProcessorHost will take care of recovering from the error and + continuing to pump messages,so no action is required from + (Params) Context: Information about the partition, Error: The error that occured. + """ + logging.error("Event Processor Error {!r}".format(error)) \ No newline at end of file diff --git a/azure-eventhubs/tests/asynctests/test_checkpoint_manager.py b/azure-eventhubs/tests/asynctests/test_checkpoint_manager.py new file mode 100644 index 000000000000..c54faacf312b --- /dev/null +++ b/azure-eventhubs/tests/asynctests/test_checkpoint_manager.py @@ -0,0 +1,128 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import asyncio +import base64 +import pytest +import time +import json +from azure.common import AzureException + + +@pytest.mark.liveTest +def test_create_store(storage_clm): + """ + Test the store is created correctly if not exists + """ + loop = asyncio.get_event_loop() + loop.run_until_complete(storage_clm.create_checkpoint_store_if_not_exists_async()) + + +@pytest.mark.liveTest +def test_create_lease(storage_clm): + """ + Test lease creation + """ + + loop = asyncio.get_event_loop() + loop.run_until_complete(storage_clm.create_checkpoint_store_if_not_exists_async()) + loop.run_until_complete(storage_clm.create_lease_if_not_exists_async("1")) + + +@pytest.mark.liveTest +def test_get_lease(storage_clm): + """ + Test get lease + """ + loop = asyncio.get_event_loop() + loop.run_until_complete(storage_clm.get_lease_async("1")) + + +@pytest.mark.liveTest +def test_aquire_renew_release_lease(storage_clm): + """ + Test aquire lease + """ + loop = asyncio.get_event_loop() + lease = loop.run_until_complete(storage_clm.get_lease_async("1")) + assert lease is None + loop.run_until_complete(storage_clm.create_lease_if_not_exists_async("1")) + lease = loop.run_until_complete(storage_clm.get_lease_async("1")) + loop.run_until_complete(storage_clm.acquire_lease_async(lease)) + loop.run_until_complete(storage_clm.renew_lease_async(lease)) + loop.run_until_complete(storage_clm.release_lease_async(lease)) + assert lease.partition_id == "1" + assert lease.epoch == 1 + assert loop.run_until_complete(lease.state()) == "available" + + +@pytest.mark.liveTest +def test_delete_lease(storage_clm): + """ + Test delete lease + """ + loop = asyncio.get_event_loop() + lease = loop.run_until_complete(storage_clm.get_lease_async("1")) + assert lease is None + loop.run_until_complete(storage_clm.create_lease_if_not_exists_async("1")) + lease = loop.run_until_complete(storage_clm.get_lease_async("1")) + loop.run_until_complete(storage_clm.delete_lease_async(lease)) + lease = loop.run_until_complete(storage_clm.get_lease_async("1")) + assert lease == None + + +@pytest.mark.liveTest +def test_checkpointing(storage_clm): + """ + Test checkpointing + """ + loop = asyncio.get_event_loop() + local_checkpoint = loop.run_until_complete(storage_clm.create_checkpoint_if_not_exists_async("1")) + assert local_checkpoint.partition_id == "1" + assert local_checkpoint.offset == "-1" + lease = loop.run_until_complete(storage_clm.get_lease_async("1")) + loop.run_until_complete(storage_clm.acquire_lease_async(lease)) + + # Test EPH context encoded as bytes + event_processor_context = {'some_string_data': 'abc', 'some_int_data': 123, 'a_list': [42]} + cloud_event_processor_context_asbytes = json.dumps(event_processor_context).encode('utf-8') + lease.event_processor_context = base64.b64encode(cloud_event_processor_context_asbytes).decode('ascii') + loop.run_until_complete(storage_clm.update_checkpoint_async(lease, local_checkpoint)) + + cloud_lease = loop.run_until_complete(storage_clm.get_lease_async("1")) + cloud_event_processor_context_asbytes = cloud_lease.event_processor_context.encode('ascii') + event_processor_context_decoded = base64.b64decode(cloud_event_processor_context_asbytes).decode('utf-8') + cloud_event_processor_context = json.loads(event_processor_context_decoded) + assert cloud_event_processor_context['some_string_data'] == 'abc' + assert cloud_event_processor_context['some_int_data'] == 123 + assert cloud_event_processor_context['a_list'] == [42] + + # Test EPH context as JSON object + lease.event_processor_context = {'some_string_data': 'abc', 'some_int_data': 123, 'a_list': [42]} + loop.run_until_complete(storage_clm.update_checkpoint_async(lease, local_checkpoint)) + + cloud_lease = loop.run_until_complete(storage_clm.get_lease_async("1")) + assert cloud_lease.event_processor_context['some_string_data'] == 'abc' + assert cloud_lease.event_processor_context['some_int_data'] == 123 + assert cloud_lease.event_processor_context['a_list'] == [42] + + cloud_checkpoint = loop.run_until_complete(storage_clm.get_checkpoint_async("1")) + lease.offset = cloud_checkpoint.offset + lease.sequence_number = cloud_checkpoint.sequence_number + lease.event_processor_context = None + assert cloud_checkpoint.partition_id == "1" + assert cloud_checkpoint.offset == "-1" + modify_checkpoint = cloud_checkpoint + modify_checkpoint.offset = "512" + modify_checkpoint.sequence_number = "32" + time.sleep(35) + loop.run_until_complete(storage_clm.update_checkpoint_async(lease, modify_checkpoint)) + cloud_lease = loop.run_until_complete(storage_clm.get_lease_async("1")) + assert cloud_lease.event_processor_context is None + + cloud_checkpoint = loop.run_until_complete(storage_clm.get_checkpoint_async("1")) + assert cloud_checkpoint.partition_id == "1" + assert cloud_checkpoint.offset == "512" + loop.run_until_complete(storage_clm.release_lease_async(lease)) diff --git a/azure-eventhubs/tests/asynctests/test_eh_partition_pump.py b/azure-eventhubs/tests/asynctests/test_eh_partition_pump.py new file mode 100644 index 000000000000..e21a7d675f6d --- /dev/null +++ b/azure-eventhubs/tests/asynctests/test_eh_partition_pump.py @@ -0,0 +1,30 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import unittest +import asyncio +import logging +import pytest + + +async def wait_and_close(host): + """ + Run EventProcessorHost for 2 minutes then shutdown. + """ + await asyncio.sleep(60) + await host.close_async() + + +@pytest.mark.liveTest +def test_partition_pump_async(eh_partition_pump): + """ + Test that event hub partition pump opens and processess messages sucessfully then closes + """ + pytest.skip("Not working yet") + loop = asyncio.get_event_loop() + tasks = asyncio.gather( + eh_partition_pump.open_async(), + wait_and_close(eh_partition_pump)) + loop.run_until_complete(tasks) diff --git a/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py b/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py new file mode 100644 index 000000000000..a8bc39757d87 --- /dev/null +++ b/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py @@ -0,0 +1,51 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import asyncio +import pytest +import time + +from azure import eventhub +from azure.eventhub import EventData, Offset, EventHubError, EventHubClientAsync + + +async def pump(receiver, sleep=None): + messages = 0 + if sleep: + await asyncio.sleep(sleep) + batch = await receiver.receive(timeout=1) + messages += len(batch) + return messages + + +async def get_partitions(iot_connection_str): + try: + client = EventHubClientAsync.from_iothub_connection_string(iot_connection_str, debug=True) + client.add_async_receiver("$default", "0", prefetch=1000, operation='/messages/events') + await client.run_async() + partitions = await client.get_eventhub_info_async() + return partitions["partition_ids"] + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_iothub_receive_multiple_async(iot_connection_str): + partitions = await get_partitions(iot_connection_str) + client = EventHubClientAsync.from_iothub_connection_string(iot_connection_str, debug=True) + try: + receivers = [] + for p in partitions: + receivers.append(client.add_async_receiver("$default", p, prefetch=10, operation='/messages/events')) + await client.run_async() + outputs = await asyncio.gather(*[pump(r) for r in receivers]) + + assert isinstance(outputs[0], int) and outputs[0] <= 10 + assert isinstance(outputs[1], int) and outputs[1] <= 10 + finally: + await client.stop_async() diff --git a/azure-eventhubs/tests/asynctests/test_longrunning_eph.py b/azure-eventhubs/tests/asynctests/test_longrunning_eph.py new file mode 100644 index 000000000000..9a51d067e312 --- /dev/null +++ b/azure-eventhubs/tests/asynctests/test_longrunning_eph.py @@ -0,0 +1,221 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import logging +import asyncio +import sys +import os +import argparse +import time +import json +import pytest +from logging.handlers import RotatingFileHandler + +from azure.eventhub import EventHubClientAsync, EventData +from azure.eventprocessorhost import ( + AbstractEventProcessor, + AzureStorageCheckpointLeaseManager, + EventHubConfig, + EventProcessorHost, + EPHOptions) + + +def get_logger(filename, level=logging.INFO): + azure_logger = logging.getLogger("azure.eventprocessorhost") + azure_logger.setLevel(level) + uamqp_logger = logging.getLogger("uamqp") + uamqp_logger.setLevel(logging.INFO) + + formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') + console_handler = logging.StreamHandler(stream=sys.stdout) + console_handler.setFormatter(formatter) + if not azure_logger.handlers: + azure_logger.addHandler(console_handler) + if not uamqp_logger.handlers: + uamqp_logger.addHandler(console_handler) + + if filename: + file_handler = RotatingFileHandler(filename, maxBytes=20*1024*1024, backupCount=3) + file_handler.setFormatter(formatter) + azure_logger.addHandler(file_handler) + uamqp_logger.addHandler(file_handler) + + return azure_logger + +logger = get_logger("eph_test_async.log", logging.INFO) + + +class EventProcessor(AbstractEventProcessor): + """ + Example Implmentation of AbstractEventProcessor + """ + + def __init__(self, params=None): + """ + Init Event processor + """ + super().__init__(params) + self._msg_counter = 0 + + async def open_async(self, context): + """ + Called by processor host to initialize the event processor. + """ + assert hasattr(context, 'event_processor_context') + assert context.event_processor_context is None + logger.info("Connection established {}. State {}".format( + context.partition_id, context.event_processor_context)) + + async def close_async(self, context, reason): + """ + Called by processor host to indicate that the event processor is being stopped. + :param context: Information about the partition + :type context: ~azure.eventprocessorhost.PartitionContext + """ + logger.info("Connection closed (reason {}, id {}, offset {}, sq_number {}, state {})".format( + reason, + context.partition_id, + context.offset, + context.sequence_number, + context.event_processor_context)) + + async def process_events_async(self, context, messages): + """ + Called by the processor host when a batch of events has arrived. + This is where the real work of the event processor is done. + :param context: Information about the partition + :type context: ~azure.eventprocessorhost.PartitionContext + :param messages: The events to be processed. + :type messages: list[~azure.eventhub.common.EventData] + """ + assert context.event_processor_context is None + print("Processing id {}, offset {}, sq_number {}, state {})".format( + context.partition_id, + context.offset, + context.sequence_number, + context.event_processor_context)) + await context.checkpoint_async() + + async def process_error_async(self, context, error): + """ + Called when the underlying client experiences an error while receiving. + EventProcessorHost will take care of recovering from the error and + continuing to pump messages,so no action is required from + :param context: Information about the partition + :type context: ~azure.eventprocessorhost.PartitionContext + :param error: The error that occured. + """ + logger.info("Event Processor Error for partition {}, {!r}".format(context.partition_id, error)) + + +async def wait_and_close(host, duration): + """ + Run EventProcessorHost for 30 seconds then shutdown. + """ + await asyncio.sleep(duration) + await host.close_async() + + +async def pump(pid, sender, duration): + deadline = time.time() + duration + total = 0 + + try: + while time.time() < deadline: + data = EventData(body=b"D" * 512) + sender.transfer(data) + total += 1 + if total % 100 == 0: + await sender.wait_async() + #logger.info("{}: Send total {}".format(pid, total)) + except Exception as err: + logger.error("{}: Send failed {}".format(pid, err)) + raise + print("{}: Final Sent total {}".format(pid, total)) + + +@pytest.mark.liveTest +def test_long_running_eph(live_eventhub): + parser = argparse.ArgumentParser() + parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) + parser.add_argument("--storage-account", help="Storage account name", default=os.environ.get('AZURE_STORAGE_ACCOUNT')) + parser.add_argument("--storage-key", help="Storage account access key", default=os.environ.get('AZURE_STORAGE_ACCESS_KEY')) + parser.add_argument("--container", help="Lease container name", default="nocontextleases") + parser.add_argument("--eventhub", help="Name of EventHub", default=live_eventhub['event_hub']) + parser.add_argument("--namespace", help="Namespace of EventHub", default=live_eventhub['namespace']) + parser.add_argument("--suffix", help="Namespace of EventHub", default="servicebus.windows.net") + parser.add_argument("--sas-policy", help="Name of the shared access policy to authenticate with", default=live_eventhub['key_name']) + parser.add_argument("--sas-key", help="Shared access key", default=live_eventhub['access_key']) + + loop = asyncio.get_event_loop() + args, _ = parser.parse_known_args() + if not args.namespace or not args.eventhub: + try: + import pytest + pytest.skip("Must specify '--namespace' and '--eventhub'") + except ImportError: + raise ValueError("Must specify '--namespace' and '--eventhub'") + + # Queue up some events in the Eventhub + conn_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( + live_eventhub['hostname'], + live_eventhub['key_name'], + live_eventhub['access_key'], + live_eventhub['event_hub']) + send_client = EventHubClientAsync.from_connection_string(conn_str) + pumps = [] + for pid in ["0", "1"]: + sender = send_client.add_async_sender(partition=pid, send_timeout=0, keep_alive=False) + pumps.append(pump(pid, sender, 15)) + loop.run_until_complete(send_client.run_async()) + results = loop.run_until_complete(asyncio.gather(*pumps, return_exceptions=True)) + loop.run_until_complete(send_client.stop_async()) + assert not any(results) + + # Eventhub config and storage manager + eh_config = EventHubConfig( + args.namespace, + args.eventhub, + args.sas_policy, + args.sas_key, + consumer_group="$default", + namespace_suffix=args.suffix) + eh_options = EPHOptions() + eh_options.release_pump_on_timeout = True + eh_options.debug_trace = False + eh_options.receive_timeout = 120 + storage_manager = AzureStorageCheckpointLeaseManager( + storage_account_name=args.storage_account, + storage_account_key=args.storage_key, + lease_renew_interval=30, + lease_container_name=args.container, + lease_duration=60) + + # Event loop and host + host = EventProcessorHost( + EventProcessor, + eh_config, + storage_manager, + ep_params=["param1","param2"], + eph_options=eh_options, + loop=loop) + + tasks = asyncio.gather( + host.open_async(), + wait_and_close(host, args.duration), return_exceptions=True) + results = loop.run_until_complete(tasks) + assert not any(results) + + +if __name__ == '__main__': + config = {} + config['hostname'] = os.environ['EVENT_HUB_HOSTNAME'] + config['event_hub'] = os.environ['EVENT_HUB_NAME'] + config['key_name'] = os.environ['EVENT_HUB_SAS_POLICY'] + config['access_key'] = os.environ['EVENT_HUB_SAS_KEY'] + config['namespace'] = os.environ['EVENT_HUB_NAMESPACE'] + config['consumer_group'] = "$Default" + config['partition'] = "0" + test_long_running_eph(config) diff --git a/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py b/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py new file mode 100644 index 000000000000..3c926dd77470 --- /dev/null +++ b/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py @@ -0,0 +1,226 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import logging +import asyncio +import sys +import os +import argparse +import time +import json +import pytest +from logging.handlers import RotatingFileHandler + +from azure.eventhub import EventHubClientAsync, EventData +from azure.eventprocessorhost import ( + AbstractEventProcessor, + AzureStorageCheckpointLeaseManager, + EventHubConfig, + EventProcessorHost, + EPHOptions) + + +def get_logger(filename, level=logging.INFO): + azure_logger = logging.getLogger("azure.eventprocessorhost") + azure_logger.setLevel(level) + uamqp_logger = logging.getLogger("uamqp") + uamqp_logger.setLevel(logging.INFO) + + formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') + console_handler = logging.StreamHandler(stream=sys.stdout) + console_handler.setFormatter(formatter) + if not azure_logger.handlers: + azure_logger.addHandler(console_handler) + if not uamqp_logger.handlers: + uamqp_logger.addHandler(console_handler) + + if filename: + file_handler = RotatingFileHandler(filename, maxBytes=20*1024*1024, backupCount=3) + file_handler.setFormatter(formatter) + azure_logger.addHandler(file_handler) + uamqp_logger.addHandler(file_handler) + + return azure_logger + +logger = get_logger("eph_test_async.log", logging.INFO) + + +class EventProcessor(AbstractEventProcessor): + """ + Example Implmentation of AbstractEventProcessor + """ + + def __init__(self, params=None): + """ + Init Event processor + """ + super().__init__(params) + self._params = params + self._msg_counter = 0 + + async def open_async(self, context): + """ + Called by processor host to initialize the event processor. + """ + assert hasattr(context, 'event_processor_context') + logger.info("Connection established {}. State {}".format( + context.partition_id, context.event_processor_context)) + + async def close_async(self, context, reason): + """ + Called by processor host to indicate that the event processor is being stopped. + :param context: Information about the partition + :type context: ~azure.eventprocessorhost.PartitionContext + """ + logger.info("Connection closed (reason {}, id {}, offset {}, sq_number {}, state {})".format( + reason, + context.partition_id, + context.offset, + context.sequence_number, + context.event_processor_context)) + + async def process_events_async(self, context, messages): + """ + Called by the processor host when a batch of events has arrived. + This is where the real work of the event processor is done. + :param context: Information about the partition + :type context: ~azure.eventprocessorhost.PartitionContext + :param messages: The events to be processed. + :type messages: list[~azure.eventhub.common.EventData] + """ + print("Processing id {}, offset {}, sq_number {}, state {})".format( + context.partition_id, + context.offset, + context.sequence_number, + context.event_processor_context)) + assert hasattr(context, 'event_processor_context') + if self._msg_counter > 1: + assert context.event_processor_context == json.dumps( + {"Sequence": self._msg_counter, "Data": self._params}) + self._msg_counter += 1 + await context.checkpoint_async( + json.dumps({"Sequence": self._msg_counter, "Data": self._params})) + + async def process_error_async(self, context, error): + """ + Called when the underlying client experiences an error while receiving. + EventProcessorHost will take care of recovering from the error and + continuing to pump messages,so no action is required from + :param context: Information about the partition + :type context: ~azure.eventprocessorhost.PartitionContext + :param error: The error that occured. + """ + logger.info("Event Processor Error for partition {}, {!r}".format(context.partition_id, error)) + + +async def wait_and_close(host, duration): + """ + Run EventProcessorHost for 30 seconds then shutdown. + """ + await asyncio.sleep(duration) + await host.close_async() + + +async def pump(pid, sender, duration): + deadline = time.time() + duration + total = 0 + + try: + while time.time() < deadline: + data = EventData(body=b"D" * 512) + sender.transfer(data) + total += 1 + if total % 100 == 0: + await sender.wait_async() + #logger.info("{}: Send total {}".format(pid, total)) + except Exception as err: + logger.error("{}: Send failed {}".format(pid, err)) + raise + print("{}: Final Sent total {}".format(pid, total)) + + +@pytest.mark.liveTest +def test_long_running_context_eph(live_eventhub): + parser = argparse.ArgumentParser() + parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) + parser.add_argument("--storage-account", help="Storage account name", default=os.environ.get('AZURE_STORAGE_ACCOUNT')) + parser.add_argument("--storage-key", help="Storage account access key", default=os.environ.get('AZURE_STORAGE_ACCESS_KEY')) + parser.add_argument("--container", help="Lease container name", default="contextleases") + parser.add_argument("--eventhub", help="Name of EventHub", default=live_eventhub['event_hub']) + parser.add_argument("--namespace", help="Namespace of EventHub", default=live_eventhub['namespace']) + parser.add_argument("--suffix", help="Namespace of EventHub", default="servicebus.windows.net") + parser.add_argument("--sas-policy", help="Name of the shared access policy to authenticate with", default=live_eventhub['key_name']) + parser.add_argument("--sas-key", help="Shared access key", default=live_eventhub['access_key']) + + loop = asyncio.get_event_loop() + args, _ = parser.parse_known_args() + if not args.namespace or not args.eventhub: + try: + import pytest + pytest.skip("Must specify '--namespace' and '--eventhub'") + except ImportError: + raise ValueError("Must specify '--namespace' and '--eventhub'") + + # Queue up some events in the Eventhub + conn_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( + live_eventhub['hostname'], + live_eventhub['key_name'], + live_eventhub['access_key'], + live_eventhub['event_hub']) + send_client = EventHubClientAsync.from_connection_string(conn_str) + pumps = [] + for pid in ["0", "1"]: + sender = send_client.add_async_sender(partition=pid, send_timeout=0, keep_alive=False) + pumps.append(pump(pid, sender, 15)) + loop.run_until_complete(send_client.run_async()) + results = loop.run_until_complete(asyncio.gather(*pumps, return_exceptions=True)) + loop.run_until_complete(send_client.stop_async()) + assert not any(results) + + # Eventhub config and storage manager + eh_config = EventHubConfig( + args.namespace, + args.eventhub, + args.sas_policy, + args.sas_key, + consumer_group="$default", + namespace_suffix=args.suffix) + eh_options = EPHOptions() + eh_options.release_pump_on_timeout = True + eh_options.debug_trace = False + eh_options.receive_timeout = 120 + storage_manager = AzureStorageCheckpointLeaseManager( + storage_account_name=args.storage_account, + storage_account_key=args.storage_key, + lease_renew_interval=30, + lease_container_name=args.container, + lease_duration=60) + + # Event loop and host + host = EventProcessorHost( + EventProcessor, + eh_config, + storage_manager, + ep_params=["param1","param2"], + eph_options=eh_options, + loop=loop) + + tasks = asyncio.gather( + host.open_async(), + wait_and_close(host, args.duration), return_exceptions=True) + results = loop.run_until_complete(tasks) + assert not any(results) + + +if __name__ == '__main__': + config = {} + config['hostname'] = os.environ['EVENT_HUB_HOSTNAME'] + config['event_hub'] = os.environ['EVENT_HUB_NAME'] + config['key_name'] = os.environ['EVENT_HUB_SAS_POLICY'] + config['access_key'] = os.environ['EVENT_HUB_SAS_KEY'] + config['namespace'] = os.environ['EVENT_HUB_NAMESPACE'] + config['consumer_group'] = "$Default" + config['partition'] = "0" + test_long_running_eph(config) diff --git a/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py b/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py new file mode 100644 index 000000000000..b3e7dca8a2dc --- /dev/null +++ b/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +receive test. +""" + +import logging +import asyncio +import argparse +import time +import os +import sys +import pytest +from logging.handlers import RotatingFileHandler + +from azure.eventhub import Offset +from azure.eventhub import EventHubClientAsync + + +def get_logger(filename, level=logging.INFO): + azure_logger = logging.getLogger("azure.eventhub") + azure_logger.setLevel(level) + uamqp_logger = logging.getLogger("uamqp") + uamqp_logger.setLevel(logging.INFO) + + formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') + console_handler = logging.StreamHandler(stream=sys.stdout) + console_handler.setFormatter(formatter) + if not azure_logger.handlers: + azure_logger.addHandler(console_handler) + if not uamqp_logger.handlers: + uamqp_logger.addHandler(console_handler) + + if filename: + file_handler = RotatingFileHandler(filename, maxBytes=20*1024*1024, backupCount=3) + file_handler.setFormatter(formatter) + azure_logger.addHandler(file_handler) + uamqp_logger.addHandler(file_handler) + + return azure_logger + +logger = get_logger("recv_test_async.log", logging.INFO) + + +async def get_partitions(client): + eh_data = await client.get_eventhub_info_async() + return eh_data["partition_ids"] + + +async def pump(_pid, receiver, _args, _dl): + total = 0 + iteration = 0 + deadline = time.time() + _dl + try: + while time.time() < deadline: + batch = await receiver.receive(timeout=1) + size = len(batch) + total += size + iteration += 1 + if size == 0: + print("{}: No events received, queue size {}, delivered {}".format( + _pid, + receiver.queue_size, + total)) + elif iteration >= 5: + iteration = 0 + print("{}: total received {}, last sn={}, last offset={}".format( + _pid, + total, + batch[-1].sequence_number, + batch[-1].offset.value)) + print("{}: total received {}".format( + _pid, + total)) + except Exception as e: + print("Partition {} receiver failed: {}".format(_pid, e)) + raise + + +@pytest.mark.liveTest +def test_long_running_receive_async(connection_str): + parser = argparse.ArgumentParser() + parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) + parser.add_argument("--consumer", help="Consumer group name", default="$default") + parser.add_argument("--partitions", help="Comma seperated partition IDs") + parser.add_argument("--offset", help="Starting offset", default="-1") + parser.add_argument("--conn-str", help="EventHub connection string", default=connection_str) + parser.add_argument("--eventhub", help="Name of EventHub") + parser.add_argument("--address", help="Address URI to the EventHub entity") + parser.add_argument("--sas-policy", help="Name of the shared access policy to authenticate with") + parser.add_argument("--sas-key", help="Shared access key") + + loop = asyncio.get_event_loop() + args, _ = parser.parse_known_args() + if args.conn_str: + client = EventHubClientAsync.from_connection_string( + args.conn_str, + eventhub=args.eventhub, auth_timeout=240, debug=False) + elif args.address: + client = EventHubClientAsync( + args.address, + auth_timeout=240, + username=args.sas_policy, + password=args.sas_key) + else: + try: + import pytest + pytest.skip("Must specify either '--conn-str' or '--address'") + except ImportError: + raise ValueError("Must specify either '--conn-str' or '--address'") + + try: + if not args.partitions: + partitions = loop.run_until_complete(get_partitions(client)) + else: + partitions = args.partitions.split(",") + pumps = [] + for pid in partitions: + receiver = client.add_async_receiver( + consumer_group=args.consumer, + partition=pid, + offset=Offset(args.offset), + prefetch=50) + pumps.append(pump(pid, receiver, args, args.duration)) + loop.run_until_complete(client.run_async()) + loop.run_until_complete(asyncio.gather(*pumps)) + finally: + loop.run_until_complete(client.stop_async()) + + +if __name__ == '__main__': + test_long_running_receive_async(os.environ.get('EVENT_HUB_CONNECTION_STR')) diff --git a/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py b/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py new file mode 100644 index 000000000000..56832f87a87d --- /dev/null +++ b/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python + +""" +send test +""" + +import logging +import argparse +import time +import os +import asyncio +import sys +import pytest +from logging.handlers import RotatingFileHandler + +from azure.eventhub import EventHubClientAsync, EventData + + +def get_logger(filename, level=logging.INFO): + azure_logger = logging.getLogger("azure.eventhub") + azure_logger.setLevel(level) + uamqp_logger = logging.getLogger("uamqp") + uamqp_logger.setLevel(logging.INFO) + + formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') + console_handler = logging.StreamHandler(stream=sys.stdout) + console_handler.setFormatter(formatter) + if not azure_logger.handlers: + azure_logger.addHandler(console_handler) + if not uamqp_logger.handlers: + uamqp_logger.addHandler(console_handler) + + if filename: + file_handler = RotatingFileHandler(filename, maxBytes=20*1024*1024, backupCount=3) + file_handler.setFormatter(formatter) + azure_logger.addHandler(file_handler) + uamqp_logger.addHandler(file_handler) + + return azure_logger + +logger = get_logger("send_test_async.log", logging.INFO) + + +def check_send_successful(outcome, condition): + if outcome.value != 0: + print("Send failed {}".format(condition)) + + +async def get_partitions(args): + eh_data = await args.get_eventhub_info_async() + return eh_data["partition_ids"] + + +async def pump(pid, sender, args, duration): + deadline = time.time() + duration + total = 0 + + def data_generator(): + for i in range(args.batch): + yield b"D" * args.payload + + if args.batch > 1: + logger.info("{}: Sending batched messages".format(pid)) + else: + logger.info("{}: Sending single messages".format(pid)) + + try: + while time.time() < deadline: + if args.batch > 1: + data = EventData(batch=data_generator()) + else: + data = EventData(body=b"D" * args.payload) + sender.transfer(data, callback=check_send_successful) + total += args.batch + if total % 100 == 0: + await sender.wait_async() + logger.info("{}: Send total {}".format(pid, total)) + except Exception as err: + logger.error("{}: Send failed {}".format(pid, err)) + raise + print("{}: Final Sent total {}".format(pid, total)) + + +@pytest.mark.liveTest +def test_long_running_partition_send_async(connection_str): + parser = argparse.ArgumentParser() + parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) + parser.add_argument("--payload", help="payload size", type=int, default=1024) + parser.add_argument("--batch", help="Number of events to send and wait", type=int, default=200) + parser.add_argument("--partitions", help="Comma seperated partition IDs") + parser.add_argument("--conn-str", help="EventHub connection string", default=connection_str) + parser.add_argument("--eventhub", help="Name of EventHub") + parser.add_argument("--address", help="Address URI to the EventHub entity") + parser.add_argument("--sas-policy", help="Name of the shared access policy to authenticate with") + parser.add_argument("--sas-key", help="Shared access key") + parser.add_argument("--logger-name", help="Unique log file ID") + + loop = asyncio.get_event_loop() + args, _ = parser.parse_known_args() + + if args.conn_str: + client = EventHubClientAsync.from_connection_string( + args.conn_str, + eventhub=args.eventhub, debug=True) + elif args.address: + client = EventHubClientAsync( + args.address, + username=args.sas_policy, + password=args.sas_key, + auth_timeout=500) + else: + try: + import pytest + pytest.skip("Must specify either '--conn-str' or '--address'") + except ImportError: + raise ValueError("Must specify either '--conn-str' or '--address'") + + try: + if not args.partitions: + partitions = loop.run_until_complete(get_partitions(client)) + else: + pid_range = args.partitions.split("-") + if len(pid_range) > 1: + partitions = [str(i) for i in range(int(pid_range[0]), int(pid_range[1]) + 1)] + else: + partitions = args.partitions.split(",") + pumps = [] + for pid in partitions: + sender = client.add_async_sender(partition=pid, send_timeout=0, keep_alive=False) + pumps.append(pump(pid, sender, args, args.duration)) + loop.run_until_complete(client.run_async()) + results = loop.run_until_complete(asyncio.gather(*pumps, return_exceptions=True)) + assert not results + except Exception as e: + logger.error("Sender failed: {}".format(e)) + finally: + logger.info("Shutting down sender") + loop.run_until_complete(client.stop_async()) + +if __name__ == '__main__': + test_long_running_partition_send_async(os.environ.get('EVENT_HUB_CONNECTION_STR')) diff --git a/azure-eventhubs/tests/asynctests/test_negative_async.py b/azure-eventhubs/tests/asynctests/test_negative_async.py new file mode 100644 index 000000000000..4b2e8b0a367b --- /dev/null +++ b/azure-eventhubs/tests/asynctests/test_negative_async.py @@ -0,0 +1,204 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import asyncio +import pytest +import time +import sys + +from azure import eventhub +from azure.eventhub import ( + EventHubClientAsync, + EventData, + Offset, + EventHubError) + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_with_invalid_hostname_async(invalid_hostname, connstr_receivers): + _, receivers = connstr_receivers + client = EventHubClientAsync.from_connection_string(invalid_hostname, debug=True) + sender = client.add_async_sender() + with pytest.raises(EventHubError): + await client.run_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_receive_with_invalid_hostname_async(invalid_hostname): + client = EventHubClientAsync.from_connection_string(invalid_hostname, debug=True) + sender = client.add_async_receiver("$default", "0") + with pytest.raises(EventHubError): + await client.run_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_with_invalid_key_async(invalid_key, connstr_receivers): + _, receivers = connstr_receivers + client = EventHubClientAsync.from_connection_string(invalid_key, debug=False) + sender = client.add_async_sender() + with pytest.raises(EventHubError): + await client.run_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_receive_with_invalid_key_async(invalid_key): + client = EventHubClientAsync.from_connection_string(invalid_key, debug=True) + sender = client.add_async_receiver("$default", "0") + with pytest.raises(EventHubError): + await client.run_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_with_invalid_policy_async(invalid_policy, connstr_receivers): + _, receivers = connstr_receivers + client = EventHubClientAsync.from_connection_string(invalid_policy, debug=False) + sender = client.add_async_sender() + with pytest.raises(EventHubError): + await client.run_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_receive_with_invalid_policy_async(invalid_policy): + client = EventHubClientAsync.from_connection_string(invalid_policy, debug=True) + sender = client.add_async_receiver("$default", "0") + with pytest.raises(EventHubError): + await client.run_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_partition_key_with_partition_async(connection_str): + client = EventHubClientAsync.from_connection_string(connection_str, debug=True) + sender = client.add_async_sender(partition="1") + try: + await client.run_async() + data = EventData(b"Data") + data.partition_key = b"PKey" + with pytest.raises(ValueError): + await sender.send(data) + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_non_existing_entity_sender_async(connection_str): + client = EventHubClientAsync.from_connection_string(connection_str, eventhub="nemo", debug=False) + sender = client.add_async_sender(partition="1") + with pytest.raises(EventHubError): + await client.run_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_non_existing_entity_receiver_async(connection_str): + client = EventHubClientAsync.from_connection_string(connection_str, eventhub="nemo", debug=False) + receiver = client.add_async_receiver("$default", "0") + with pytest.raises(EventHubError): + await client.run_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_receive_from_invalid_partitions_async(connection_str): + partitions = ["XYZ", "-1", "1000", "-" ] + for p in partitions: + client = EventHubClientAsync.from_connection_string(connection_str, debug=True) + receiver = client.add_async_receiver("$default", p) + try: + with pytest.raises(EventHubError): + await client.run_async() + await receiver.receive(timeout=10) + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_to_invalid_partitions_async(connection_str): + partitions = ["XYZ", "-1", "1000", "-" ] + for p in partitions: + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + sender = client.add_async_sender(partition=p) + try: + with pytest.raises(EventHubError): + await client.run_async() + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_too_large_message_async(connection_str): + if sys.platform.startswith('darwin'): + pytest.skip("Skipping on OSX - open issue regarding message size") + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + sender = client.add_async_sender() + try: + await client.run_async() + data = EventData(b"A" * 300000) + with pytest.raises(EventHubError): + await sender.send(data) + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_null_body_async(connection_str): + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + sender = client.add_async_sender() + try: + await client.run_async() + with pytest.raises(ValueError): + data = EventData(None) + await sender.send(data) + finally: + await client.stop_async() + + +async def pump(receiver): + messages = 0 + count = 0 + batch = await receiver.receive(timeout=10) + while batch and count <= 5: + count += 1 + messages += len(batch) + batch = await receiver.receive(timeout=10) + return messages + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_max_receivers_async(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClientAsync.from_connection_string(connection_str, debug=True) + receivers = [] + for i in range(6): + receivers.append(client.add_async_receiver("$default", "0", prefetch=1000, offset=Offset('@latest'))) + try: + await client.run_async() + outputs = await asyncio.gather( + pump(receivers[0]), + pump(receivers[1]), + pump(receivers[2]), + pump(receivers[3]), + pump(receivers[4]), + pump(receivers[5]), + return_exceptions=True) + print(outputs) + failed = [o for o in outputs if isinstance(o, EventHubError)] + assert len(failed) == 1 + print(failed[0].message) + finally: + await client.stop_async() diff --git a/azure-eventhubs/tests/asynctests/test_partition_manager.py b/azure-eventhubs/tests/asynctests/test_partition_manager.py new file mode 100644 index 000000000000..aab0a1b4e501 --- /dev/null +++ b/azure-eventhubs/tests/asynctests/test_partition_manager.py @@ -0,0 +1,17 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import asyncio +import pytest + + +@pytest.mark.liveTest +def test_get_partition_ids(partition_manager): + """ + Test that partition manger returns all the partitions for an event hub + """ + loop = asyncio.get_event_loop() + pids = loop.run_until_complete(partition_manager.get_partition_ids_async()) + assert pids == ["0", "1"] diff --git a/azure-eventhubs/tests/asynctests/test_partition_pump.py b/azure-eventhubs/tests/asynctests/test_partition_pump.py new file mode 100644 index 000000000000..88c3eea4cb7e --- /dev/null +++ b/azure-eventhubs/tests/asynctests/test_partition_pump.py @@ -0,0 +1,40 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# ----------------------------------------------------------------------------------- + +import asyncio +import pytest + + +@pytest.mark.liveTest +def test_open_async(partition_pump): + """ + Test that partition pump opens sucessfully + """ + loop = asyncio.get_event_loop() + loop.run_until_complete(partition_pump.open_async()) # Simulate Open + + +@pytest.mark.liveTest +def test_process_events_async(partition_pump): + """ + Test that the partition pump processes a list of mock events (["event1", "event2"]) + properly + """ + loop = asyncio.get_event_loop() + loop.run_until_complete(partition_pump.open_async()) # Simulate Open + _mock_events = ["event1", "event2"] # Mock Events + loop.run_until_complete(partition_pump.process_events_async(_mock_events)) # Simulate Process + + +@pytest.mark.liveTest +def test_close_async(partition_pump): + """ + Test that partition pump closes + """ + loop = asyncio.get_event_loop() + loop.run_until_complete(partition_pump.open_async()) # Simulate Open + _mock_events = ["event1", "event2"] # Mock Events + loop.run_until_complete(partition_pump.process_events_async(_mock_events)) # Simulate Process + loop.run_until_complete(partition_pump.close_async("Finished")) # Simulate Close diff --git a/azure-eventhubs/tests/asynctests/test_receive_async.py b/azure-eventhubs/tests/asynctests/test_receive_async.py new file mode 100644 index 000000000000..6b086ff8202c --- /dev/null +++ b/azure-eventhubs/tests/asynctests/test_receive_async.py @@ -0,0 +1,350 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import asyncio +import pytest +import time + +from azure import eventhub +from azure.eventhub import EventData, Offset, EventHubError, EventHubClientAsync + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_receive_end_of_stream_async(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + receiver = client.add_async_receiver("$default", "0", offset=Offset('@latest')) + await client.run_async() + try: + received = await receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Receiving only a single event")) + received = await receiver.receive(timeout=5) + assert len(received) == 1 + + assert list(received[-1].body)[0] == b"Receiving only a single event" + except: + raise + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_receive_with_offset_async(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + receiver = client.add_async_receiver("$default", "0", offset=Offset('@latest')) + await client.run_async() + try: + received = await receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Data")) + time.sleep(1) + received = await receiver.receive(timeout=3) + assert len(received) == 1 + offset = received[0].offset + + offset_receiver = client.add_async_receiver("$default", "0", offset=offset) + await client.run_async() + received = await offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message after offset")) + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 + except: + raise + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_receive_with_inclusive_offset_async(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + receiver = client.add_async_receiver("$default", "0", offset=Offset('@latest')) + await client.run_async() + try: + received = await receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Data")) + time.sleep(1) + received = await receiver.receive(timeout=5) + assert len(received) == 1 + offset = received[0].offset + + offset_receiver = client.add_async_receiver("$default", "0", offset=Offset(offset.value, inclusive=True)) + await client.run_async() + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 + except: + raise + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_receive_with_datetime_async(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + receiver = client.add_async_receiver("$default", "0", offset=Offset('@latest')) + await client.run_async() + try: + received = await receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Data")) + received = await receiver.receive(timeout=5) + assert len(received) == 1 + offset = received[0].enqueued_time + + offset_receiver = client.add_async_receiver("$default", "0", offset=Offset(offset)) + await client.run_async() + received = await offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message after timestamp")) + time.sleep(1) + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 + except: + raise + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_receive_with_sequence_no_async(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + receiver = client.add_async_receiver("$default", "0", offset=Offset('@latest')) + await client.run_async() + try: + received = await receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Data")) + received = await receiver.receive(timeout=5) + assert len(received) == 1 + offset = received[0].sequence_number + + offset_receiver = client.add_async_receiver("$default", "0", offset=Offset(offset)) + await client.run_async() + received = await offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message next in sequence")) + time.sleep(1) + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 + except: + raise + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_receive_with_inclusive_sequence_no_async(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + receiver = client.add_async_receiver("$default", "0", offset=Offset('@latest')) + await client.run_async() + try: + received = await receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Data")) + received = await receiver.receive(timeout=5) + assert len(received) == 1 + offset = received[0].sequence_number + + offset_receiver = client.add_async_receiver("$default", "0", offset=Offset(offset, inclusive=True)) + await client.run_async() + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 + except: + raise + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_receive_batch_async(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + receiver = client.add_async_receiver("$default", "0", prefetch=500, offset=Offset('@latest')) + await client.run_async() + try: + received = await receiver.receive(timeout=5) + assert len(received) == 0 + for i in range(10): + senders[0].send(EventData(b"Data")) + received = await receiver.receive(max_batch_size=5, timeout=5) + assert len(received) == 5 + except: + raise + finally: + await client.stop_async() + + +async def pump(receiver, sleep=None): + messages = 0 + count = 0 + if sleep: + await asyncio.sleep(sleep) + batch = await receiver.receive(timeout=10) + while batch: + count += 1 + if count >= 10: + break + messages += len(batch) + batch = await receiver.receive(timeout=10) + return messages + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_epoch_receiver_async(connstr_senders): + connection_str, senders = connstr_senders + senders[0].send(EventData(b"Receiving only a single event")) + + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + receivers = [] + for epoch in [10, 20]: + receivers.append(client.add_async_epoch_receiver("$default", "0", epoch, prefetch=5)) + try: + await client.run_async() + outputs = await asyncio.gather( + pump(receivers[0]), + pump(receivers[1]), + return_exceptions=True) + assert isinstance(outputs[0], EventHubError) + assert outputs[1] == 1 + except: + raise + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_multiple_receiver_async(connstr_senders): + connection_str, senders = connstr_senders + senders[0].send(EventData(b"Receiving only a single event")) + + client = EventHubClientAsync.from_connection_string(connection_str, debug=True) + partitions = await client.get_eventhub_info_async() + assert partitions["partition_ids"] == ["0", "1"] + receivers = [] + for i in range(2): + receivers.append(client.add_async_receiver("$default", "0", prefetch=10)) + try: + await client.run_async() + more_partitions = await client.get_eventhub_info_async() + assert more_partitions["partition_ids"] == ["0", "1"] + outputs = await asyncio.gather( + pump(receivers[0]), + pump(receivers[1]), + return_exceptions=True) + assert isinstance(outputs[0], int) and outputs[0] == 1 + assert isinstance(outputs[1], int) and outputs[1] == 1 + except: + raise + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_epoch_receiver_after_non_epoch_receiver_async(connstr_senders): + connection_str, senders = connstr_senders + senders[0].send(EventData(b"Receiving only a single event")) + + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + receivers = [] + receivers.append(client.add_async_receiver("$default", "0", prefetch=10)) + receivers.append(client.add_async_epoch_receiver("$default", "0", 15, prefetch=10)) + try: + await client.run_async() + outputs = await asyncio.gather( + pump(receivers[0]), + pump(receivers[1], sleep=5), + return_exceptions=True) + assert isinstance(outputs[0], EventHubError) + assert isinstance(outputs[1], int) and outputs[1] == 1 + except: + raise + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_non_epoch_receiver_after_epoch_receiver_async(connstr_senders): + connection_str, senders = connstr_senders + senders[0].send(EventData(b"Receiving only a single event")) + + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + receivers = [] + receivers.append(client.add_async_epoch_receiver("$default", "0", 15, prefetch=10)) + receivers.append(client.add_async_receiver("$default", "0", prefetch=10)) + try: + await client.run_async() + outputs = await asyncio.gather( + pump(receivers[0]), + pump(receivers[1]), + return_exceptions=True) + assert isinstance(outputs[1], EventHubError) + assert isinstance(outputs[0], int) and outputs[0] == 1 + except: + raise + finally: + await client.stop_async() + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_receive_batch_with_app_prop_async(connstr_senders): + pytest.skip("Waiting on uAMQP release") + connection_str, senders = connstr_senders + + def batched(): + for i in range(10): + yield "Event Data {}".format(i) + for i in range(10, 20): + yield EventData("Event Data {}".format(i)) + + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + receiver = client.add_async_receiver("$default", "0", prefetch=500, offset=Offset('@latest')) + try: + await client.run_async() + + received = await receiver.receive(timeout=5) + assert len(received) == 0 + + app_prop_key = "raw_prop" + app_prop_value = "raw_value" + batch_app_prop = {app_prop_key:app_prop_value} + batch_event = EventData(batch=batched()) + batch_event.application_properties = batch_app_prop + + senders[0].send(batch_event) + + await asyncio.sleep(1) + + received = await receiver.receive(max_batch_size=15, timeout=5) + assert len(received) == 15 + + for index, message in enumerate(received): + assert list(message.body)[0] == "Event Data {}".format(index).encode('utf-8') + assert (app_prop_key.encode('utf-8') in message.application_properties) \ + and (dict(message.application_properties)[app_prop_key.encode('utf-8')] == app_prop_value.encode('utf-8')) + except: + raise + finally: + await client.stop_async() diff --git a/azure-eventhubs/tests/asynctests/test_reconnect_async.py b/azure-eventhubs/tests/asynctests/test_reconnect_async.py new file mode 100644 index 000000000000..9fafc0dc0069 --- /dev/null +++ b/azure-eventhubs/tests/asynctests/test_reconnect_async.py @@ -0,0 +1,94 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import time +import asyncio +import pytest + +from azure import eventhub +from azure.eventhub import ( + EventHubClientAsync, + EventData, + Offset, + EventHubError) + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_with_long_interval_async(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClientAsync.from_connection_string(connection_str, debug=True) + sender = client.add_async_sender() + try: + await client.run_async() + await sender.send(EventData(b"A single event")) + for _ in range(2): + await asyncio.sleep(300) + await sender.send(EventData(b"A single event")) + finally: + await client.stop_async() + + received = [] + for r in receivers: + received.extend(r.receive(timeout=1)) + assert len(received) == 3 + assert list(received[0].body)[0] == b"A single event" + + +def pump(receiver): + messages = [] + batch = receiver.receive(timeout=1) + messages.extend(batch) + while batch: + batch = receiver.receive(timeout=1) + messages.extend(batch) + return messages + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_with_forced_conn_close_async(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClientAsync.from_connection_string(connection_str, debug=True) + sender = client.add_async_sender() + try: + await client.run_async() + await sender.send(EventData(b"A single event")) + sender._handler._message_sender.destroy() + await asyncio.sleep(300) + await sender.send(EventData(b"A single event")) + await sender.send(EventData(b"A single event")) + sender._handler._message_sender.destroy() + await asyncio.sleep(300) + await sender.send(EventData(b"A single event")) + await sender.send(EventData(b"A single event")) + finally: + await client.stop_async() + + received = [] + for r in receivers: + received.extend(pump(r)) + assert len(received) == 5 + assert list(received[0].body)[0] == b"A single event" + + +# def test_send_with_forced_link_detach(connstr_receivers): +# connection_str, receivers = connstr_receivers +# client = EventHubClient.from_connection_string(connection_str, debug=True) +# sender = client.add_sender() +# size = 20 * 1024 +# try: +# client.run() +# for i in range(1000): +# sender.transfer(EventData([b"A"*size, b"B"*size, b"C"*size, b"D"*size, b"A"*size, b"B"*size, b"C"*size, b"D"*size, b"A"*size, b"B"*size, b"C"*size, b"D"*size])) +# sender.wait() +# finally: +# client.stop() + +# received = [] +# for r in receivers: +# received.extend(r.receive(timeout=10)) diff --git a/azure-eventhubs/tests/asynctests/test_send_async.py b/azure-eventhubs/tests/asynctests/test_send_async.py new file mode 100644 index 000000000000..917d7cde3b63 --- /dev/null +++ b/azure-eventhubs/tests/asynctests/test_send_async.py @@ -0,0 +1,266 @@ +# -- coding: utf-8 -- +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import asyncio +import pytest +import time +import json + +from azure.eventhub import EventData, EventHubClientAsync + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_with_partition_key_async(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + sender = client.add_async_sender() + await client.run_async() + + data_val = 0 + for partition in [b"a", b"b", b"c", b"d", b"e", b"f"]: + partition_key = b"test_partition_" + partition + for i in range(50): + data = EventData(str(data_val)) + data.partition_key = partition_key + data_val += 1 + await sender.send(data) + await client.stop_async() + + found_partition_keys = {} + for index, partition in enumerate(receivers): + received = partition.receive(timeout=5) + for message in received: + try: + existing = found_partition_keys[message.partition_key] + assert existing == index + except KeyError: + found_partition_keys[message.partition_key] = index + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_and_receive_zero_length_body_async(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + sender = client.add_async_sender() + try: + await client.run_async() + await sender.send(EventData("")) + except: + raise + finally: + await client.stop_async() + + received = [] + for r in receivers: + received.extend(r.receive(timeout=1)) + + assert len(received) == 1 + assert list(received[0].body)[0] == b"" + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_single_event_async(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + sender = client.add_async_sender() + try: + await client.run_async() + await sender.send(EventData(b"A single event")) + except: + raise + finally: + await client.stop_async() + + received = [] + for r in receivers: + received.extend(r.receive(timeout=1)) + + assert len(received) == 1 + assert list(received[0].body)[0] == b"A single event" + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_batch_async(connstr_receivers): + connection_str, receivers = connstr_receivers + def batched(): + for i in range(10): + yield "Event number {}".format(i) + + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + sender = client.add_async_sender() + try: + await client.run_async() + await sender.send(EventData(batch=batched())) + except: + raise + finally: + await client.stop_async() + + time.sleep(1) + received = [] + for r in receivers: + received.extend(r.receive(timeout=3)) + + assert len(received) == 10 + for index, message in enumerate(received): + assert list(message.body)[0] == "Event number {}".format(index).encode('utf-8') + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_partition_async(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + sender = client.add_async_sender(partition="1") + try: + await client.run_async() + await sender.send(EventData(b"Data")) + except: + raise + finally: + await client.stop_async() + + partition_0 = receivers[0].receive(timeout=2) + assert len(partition_0) == 0 + partition_1 = receivers[1].receive(timeout=2) + assert len(partition_1) == 1 + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_non_ascii_async(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + sender = client.add_async_sender(partition="0") + try: + await client.run_async() + await sender.send(EventData("é,è,à,ù,â,ê,î,ô,û")) + await sender.send(EventData(json.dumps({"foo": "漢字"}))) + except: + raise + finally: + await client.stop_async() + + partition_0 = receivers[0].receive(timeout=2) + assert len(partition_0) == 2 + assert partition_0[0].body_as_str() == "é,è,à,ù,â,ê,î,ô,û" + assert partition_0[1].body_as_json() == {"foo": "漢字"} + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_partition_batch_async(connstr_receivers): + connection_str, receivers = connstr_receivers + def batched(): + for i in range(10): + yield "Event number {}".format(i) + + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + sender = client.add_async_sender(partition="1") + try: + await client.run_async() + await sender.send(EventData(batch=batched())) + except: + raise + finally: + await client.stop_async() + + partition_0 = receivers[0].receive(timeout=2) + assert len(partition_0) == 0 + partition_1 = receivers[1].receive(timeout=2) + assert len(partition_1) == 10 + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_array_async(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + sender = client.add_async_sender() + try: + await client.run_async() + await sender.send(EventData([b"A", b"B", b"C"])) + except: + raise + finally: + await client.stop_async() + + received = [] + for r in receivers: + received.extend(r.receive(timeout=1)) + + assert len(received) == 1 + assert list(received[0].body) == [b"A", b"B", b"C"] + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_multiple_clients_async(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + sender_0 = client.add_async_sender(partition="0") + sender_1 = client.add_async_sender(partition="1") + try: + await client.run_async() + await sender_0.send(EventData(b"Message 0")) + await sender_1.send(EventData(b"Message 1")) + except: + raise + finally: + await client.stop_async() + + partition_0 = receivers[0].receive(timeout=2) + assert len(partition_0) == 1 + partition_1 = receivers[1].receive(timeout=2) + assert len(partition_1) == 1 + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_batch_with_app_prop_async(connstr_receivers): + pytest.skip("Waiting on uAMQP release") + connection_str, receivers = connstr_receivers + + def batched(): + for i in range(10): + yield "Event number {}".format(i) + for i in range(10, 20): + yield EventData("Event number {}".format(i)) + + client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + sender = client.add_async_sender() + try: + await client.run_async() + + app_prop_key = "raw_prop" + app_prop_value = "raw_value" + batch_app_prop = {app_prop_key:app_prop_value} + batch_event = EventData(batch=batched()) + batch_event.application_properties = batch_app_prop + + await sender.send(batch_event) + except: + raise + finally: + await client.stop_async() + + time.sleep(1) + + received = [] + for r in receivers: + received.extend(r.receive(timeout=3)) + + assert len(received) == 20 + for index, message in enumerate(received): + assert list(message.body)[0] == "Event number {}".format(index).encode('utf-8') + assert (app_prop_key.encode('utf-8') in message.application_properties) \ + and (dict(message.application_properties)[app_prop_key.encode('utf-8')] == app_prop_value.encode('utf-8')) diff --git a/azure-eventhubs/tests/test_iothub_receive.py b/azure-eventhubs/tests/test_iothub_receive.py new file mode 100644 index 000000000000..ce3db34940e8 --- /dev/null +++ b/azure-eventhubs/tests/test_iothub_receive.py @@ -0,0 +1,26 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import pytest +import time + +from azure import eventhub +from azure.eventhub import EventData, EventHubClient, Offset + + +@pytest.mark.liveTest +def test_iothub_receive_sync(iot_connection_str, device_id): + client = EventHubClient.from_iothub_connection_string(iot_connection_str, debug=True) + receiver = client.add_receiver("$default", "0", operation='/messages/events') + try: + client.run() + partitions = client.get_eventhub_info() + assert partitions["partition_ids"] == ["0", "1", "2", "3"] + received = receiver.receive(timeout=5) + assert len(received) == 0 + finally: + client.stop() \ No newline at end of file diff --git a/azure-eventhubs/tests/test_iothub_send.py b/azure-eventhubs/tests/test_iothub_send.py new file mode 100644 index 000000000000..96d4adaa4cf1 --- /dev/null +++ b/azure-eventhubs/tests/test_iothub_send.py @@ -0,0 +1,29 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import pytest +import time +import uuid + +from uamqp.message import MessageProperties + +from azure import eventhub +from azure.eventhub import EventData, EventHubClient + + +@pytest.mark.liveTest +def test_iothub_send_single_event(iot_connection_str, device_id): + client = EventHubClient.from_iothub_connection_string(iot_connection_str, debug=True) + sender = client.add_sender(operation='/messages/devicebound') + try: + client.run() + outcome = sender.send(EventData(b"A single event", to_device=device_id)) + assert outcome.value == 0 + except: + raise + finally: + client.stop() diff --git a/azure-eventhubs/tests/test_longrunning_receive.py b/azure-eventhubs/tests/test_longrunning_receive.py new file mode 100644 index 000000000000..1afbd9c05103 --- /dev/null +++ b/azure-eventhubs/tests/test_longrunning_receive.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +receive test. +""" + +import logging +import argparse +import time +import os +import sys +import pytest + +from logging.handlers import RotatingFileHandler + +from azure.eventhub import Offset +from azure.eventhub import EventHubClient + +def get_logger(filename, level=logging.INFO): + azure_logger = logging.getLogger("azure.eventhub") + azure_logger.setLevel(level) + uamqp_logger = logging.getLogger("uamqp") + uamqp_logger.setLevel(logging.INFO) + + formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') + console_handler = logging.StreamHandler(stream=sys.stdout) + console_handler.setFormatter(formatter) + if not azure_logger.handlers: + azure_logger.addHandler(console_handler) + if not uamqp_logger.handlers: + uamqp_logger.addHandler(console_handler) + + if filename: + file_handler = RotatingFileHandler(filename, maxBytes=20*1024*1024, backupCount=3) + file_handler.setFormatter(formatter) + azure_logger.addHandler(file_handler) + uamqp_logger.addHandler(file_handler) + + return azure_logger + +logger = get_logger("recv_test.log", logging.INFO) + + +def get_partitions(args): + eh_data = args.get_eventhub_info() + return eh_data["partition_ids"] + + +def pump(receivers, duration): + total = 0 + iteration = 0 + deadline = time.time() + duration + try: + while time.time() < deadline: + for pid, receiver in receivers.items(): + batch = receiver.receive(timeout=5) + size = len(batch) + total += size + iteration += 1 + if size == 0: + print("{}: No events received, queue size {}, delivered {}".format( + pid, + receiver.queue_size, + total)) + elif iteration >= 50: + iteration = 0 + print("{}: total received {}, last sn={}, last offset={}".format( + pid, + total, + batch[-1].sequence_number, + batch[-1].offset.value)) + print("Total received {}".format(total)) + except Exception as e: + print("Receiver failed: {}".format(e)) + raise + + +@pytest.mark.liveTest +def test_long_running_receive(connection_str): + parser = argparse.ArgumentParser() + parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) + parser.add_argument("--consumer", help="Consumer group name", default="$default") + parser.add_argument("--partitions", help="Comma seperated partition IDs") + parser.add_argument("--offset", help="Starting offset", default="-1") + parser.add_argument("--conn-str", help="EventHub connection string", default=connection_str) + parser.add_argument("--eventhub", help="Name of EventHub") + parser.add_argument("--address", help="Address URI to the EventHub entity") + parser.add_argument("--sas-policy", help="Name of the shared access policy to authenticate with") + parser.add_argument("--sas-key", help="Shared access key") + + args, _ = parser.parse_known_args() + if args.conn_str: + client = EventHubClient.from_connection_string( + args.conn_str, + eventhub=args.eventhub, debug=False) + elif args.address: + client = EventHubClient( + args.address, + username=args.sas_policy, + password=args.sas_key) + else: + try: + import pytest + pytest.skip("Must specify either '--conn-str' or '--address'") + except ImportError: + raise ValueError("Must specify either '--conn-str' or '--address'") + + try: + if not args.partitions: + partitions = get_partitions(client) + else: + partitions = args.partitions.split(",") + pumps = {} + for pid in partitions: + pumps[pid] = client.add_receiver( + consumer_group=args.consumer, + partition=pid, + offset=Offset(args.offset), + prefetch=50) + client.run() + pump(pumps, args.duration) + finally: + client.stop() + + +if __name__ == '__main__': + test_long_running_receive(os.environ.get('EVENT_HUB_CONNECTION_STR')) diff --git a/azure-eventhubs/tests/test_longrunning_send.py b/azure-eventhubs/tests/test_longrunning_send.py new file mode 100644 index 000000000000..31744d8550dd --- /dev/null +++ b/azure-eventhubs/tests/test_longrunning_send.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +send test +""" + +import argparse +import time +import os +import sys +import logging +import pytest +from logging.handlers import RotatingFileHandler + +from azure.eventhub import EventHubClient, Sender, EventData + + +def get_logger(filename, level=logging.INFO): + azure_logger = logging.getLogger("azure.eventhub") + azure_logger.setLevel(level) + uamqp_logger = logging.getLogger("uamqp") + uamqp_logger.setLevel(logging.INFO) + + formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') + console_handler = logging.StreamHandler(stream=sys.stdout) + console_handler.setFormatter(formatter) + if not azure_logger.handlers: + azure_logger.addHandler(console_handler) + if not uamqp_logger.handlers: + uamqp_logger.addHandler(console_handler) + + if filename: + file_handler = RotatingFileHandler(filename, maxBytes=20*1024*1024, backupCount=3) + file_handler.setFormatter(formatter) + azure_logger.addHandler(file_handler) + uamqp_logger.addHandler(file_handler) + + return azure_logger + +logger = get_logger("send_test.log", logging.INFO) + + +def check_send_successful(outcome, condition): + if outcome.value != 0: + print("Send failed {}".format(condition)) + + +def main(client, args): + sender = client.add_sender() + client.run() + deadline = time.time() + args.duration + total = 0 + + def data_generator(): + for i in range(args.batch): + yield b"D" * args.payload + + if args.batch > 1: + print("Sending batched messages") + else: + print("Sending single messages") + + try: + while time.time() < deadline: + if args.batch > 1: + data = EventData(batch=data_generator()) + else: + data = EventData(body=b"D" * args.payload) + sender.transfer(data, callback=check_send_successful) + total += args.batch + if total % 10000 == 0: + sender.wait() + print("Send total {}".format(total)) + except Exception as err: + print("Send failed {}".format(err)) + finally: + client.stop() + print("Sent total {}".format(total)) + + +@pytest.mark.liveTest +def test_long_running_send(connection_str): + if sys.platform.startswith('darwin'): + import pytest + pytest.skip("Skipping on OSX") + parser = argparse.ArgumentParser() + parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) + parser.add_argument("--payload", help="payload size", type=int, default=512) + parser.add_argument("--batch", help="Number of events to send and wait", type=int, default=1) + parser.add_argument("--conn-str", help="EventHub connection string", default=connection_str) + parser.add_argument("--eventhub", help="Name of EventHub") + parser.add_argument("--address", help="Address URI to the EventHub entity") + parser.add_argument("--sas-policy", help="Name of the shared access policy to authenticate with") + parser.add_argument("--sas-key", help="Shared access key") + + args, _ = parser.parse_known_args() + if args.conn_str: + client = EventHubClient.from_connection_string( + args.conn_str, + eventhub=args.eventhub) + elif args.address: + client = EventHubClient( + args.address, + username=args.sas_policy, + password=args.sas_key) + else: + try: + import pytest + pytest.skip("Must specify either '--conn-str' or '--address'") + except ImportError: + raise ValueError("Must specify either '--conn-str' or '--address'") + + try: + main(client, args) + except KeyboardInterrupt: + pass + +if __name__ == '__main__': + test_long_running_send(os.environ.get('EVENT_HUB_CONNECTION_STR')) diff --git a/azure-eventhubs/tests/test_negative.py b/azure-eventhubs/tests/test_negative.py new file mode 100644 index 000000000000..28fd7493ef13 --- /dev/null +++ b/azure-eventhubs/tests/test_negative.py @@ -0,0 +1,210 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import pytest +import time +import sys + +from azure import eventhub +from azure.eventhub import ( + EventData, + Offset, + EventHubError, + EventHubClient) + + +@pytest.mark.liveTest +def test_send_with_invalid_hostname(invalid_hostname, connstr_receivers): + _, receivers = connstr_receivers + client = EventHubClient.from_connection_string(invalid_hostname, debug=False) + sender = client.add_sender() + with pytest.raises(EventHubError): + client.run() + + +@pytest.mark.liveTest +def test_receive_with_invalid_hostname_sync(invalid_hostname): + client = EventHubClient.from_connection_string(invalid_hostname, debug=True) + receiver = client.add_receiver("$default", "0") + with pytest.raises(EventHubError): + client.run() + + +@pytest.mark.liveTest +def test_send_with_invalid_key(invalid_key, connstr_receivers): + _, receivers = connstr_receivers + client = EventHubClient.from_connection_string(invalid_key, debug=False) + sender = client.add_sender() + with pytest.raises(EventHubError): + client.run() + + +@pytest.mark.liveTest +def test_receive_with_invalid_key_sync(invalid_key): + client = EventHubClient.from_connection_string(invalid_key, debug=True) + receiver = client.add_receiver("$default", "0") + with pytest.raises(EventHubError): + client.run() + + +@pytest.mark.liveTest +def test_send_with_invalid_policy(invalid_policy, connstr_receivers): + _, receivers = connstr_receivers + client = EventHubClient.from_connection_string(invalid_policy, debug=False) + sender = client.add_sender() + with pytest.raises(EventHubError): + client.run() + + +@pytest.mark.liveTest +def test_receive_with_invalid_policy_sync(invalid_policy): + client = EventHubClient.from_connection_string(invalid_policy, debug=True) + receiver = client.add_receiver("$default", "0") + with pytest.raises(EventHubError): + client.run() + + +@pytest.mark.liveTest +def test_send_partition_key_with_partition_sync(connection_str): + client = EventHubClient.from_connection_string(connection_str, debug=True) + sender = client.add_sender(partition="1") + try: + client.run() + data = EventData(b"Data") + data.partition_key = b"PKey" + with pytest.raises(ValueError): + sender.send(data) + finally: + client.stop() + + +@pytest.mark.liveTest +def test_non_existing_entity_sender(connection_str): + client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", debug=False) + sender = client.add_sender(partition="1") + with pytest.raises(EventHubError): + client.run() + + +@pytest.mark.liveTest +def test_non_existing_entity_receiver(connection_str): + client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", debug=False) + receiver = client.add_receiver("$default", "0") + with pytest.raises(EventHubError): + client.run() + + +@pytest.mark.liveTest +def test_receive_from_invalid_partitions_sync(connection_str): + partitions = ["XYZ", "-1", "1000", "-" ] + for p in partitions: + client = EventHubClient.from_connection_string(connection_str, debug=True) + receiver = client.add_receiver("$default", p) + try: + with pytest.raises(EventHubError): + client.run() + receiver.receive(timeout=10) + finally: + client.stop() + + +@pytest.mark.liveTest +def test_send_to_invalid_partitions(connection_str): + partitions = ["XYZ", "-1", "1000", "-" ] + for p in partitions: + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.add_sender(partition=p) + try: + with pytest.raises(EventHubError): + client.run() + finally: + client.stop() + + +@pytest.mark.liveTest +def test_send_too_large_message(connection_str): + if sys.platform.startswith('darwin'): + pytest.skip("Skipping on OSX - open issue regarding message size") + client = EventHubClient.from_connection_string(connection_str, debug=True) + sender = client.add_sender() + try: + client.run() + data = EventData(b"A" * 300000) + with pytest.raises(EventHubError): + sender.send(data) + finally: + client.stop() + + +@pytest.mark.liveTest +def test_send_null_body(connection_str): + partitions = ["XYZ", "-1", "1000", "-" ] + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.add_sender() + try: + client.run() + with pytest.raises(ValueError): + data = EventData(None) + sender.send(data) + finally: + client.stop() + + +@pytest.mark.liveTest +def test_message_body_types(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) + try: + client.run() + + received = receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Bytes Data")) + time.sleep(1) + received = receiver.receive(timeout=5) + assert len(received) == 1 + assert list(received[0].body) == [b'Bytes Data'] + assert received[0].body_as_str() == "Bytes Data" + with pytest.raises(TypeError): + received[0].body_as_json() + + senders[0].send(EventData("Str Data")) + time.sleep(1) + received = receiver.receive(timeout=5) + assert len(received) == 1 + assert list(received[0].body) == [b'Str Data'] + assert received[0].body_as_str() == "Str Data" + with pytest.raises(TypeError): + received[0].body_as_json() + + senders[0].send(EventData(b'{"test_value": "JSON bytes data", "key1": true, "key2": 42}')) + time.sleep(1) + received = receiver.receive(timeout=5) + assert len(received) == 1 + assert list(received[0].body) == [b'{"test_value": "JSON bytes data", "key1": true, "key2": 42}'] + assert received[0].body_as_str() == '{"test_value": "JSON bytes data", "key1": true, "key2": 42}' + assert received[0].body_as_json() == {"test_value": "JSON bytes data", "key1": True, "key2": 42} + + senders[0].send(EventData('{"test_value": "JSON str data", "key1": true, "key2": 42}')) + time.sleep(1) + received = receiver.receive(timeout=5) + assert len(received) == 1 + assert list(received[0].body) == [b'{"test_value": "JSON str data", "key1": true, "key2": 42}'] + assert received[0].body_as_str() == '{"test_value": "JSON str data", "key1": true, "key2": 42}' + assert received[0].body_as_json() == {"test_value": "JSON str data", "key1": True, "key2": 42} + + senders[0].send(EventData(42)) + time.sleep(1) + received = receiver.receive(timeout=5) + assert len(received) == 1 + assert received[0].body_as_str() == "42" + assert received[0].body == 42 + except: + raise + finally: + client.stop() \ No newline at end of file diff --git a/azure-eventhubs/tests/test_receive.py b/azure-eventhubs/tests/test_receive.py new file mode 100644 index 000000000000..0b05bf78c842 --- /dev/null +++ b/azure-eventhubs/tests/test_receive.py @@ -0,0 +1,303 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import pytest +import time +import datetime + +from azure import eventhub +from azure.eventhub import EventData, EventHubClient, Offset + + +# def test_receive_without_events(connstr_senders): +# connection_str, senders = connstr_senders +# client = EventHubClient.from_connection_string(connection_str, debug=True) +# receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) +# finish = datetime.datetime.now() + datetime.timedelta(seconds=240) +# count = 0 +# try: +# client.run() +# while True: #datetime.datetime.now() < finish: +# senders[0].send(EventData("Receiving an event {}".format(count))) +# received = receiver.receive(timeout=1) +# if received: +# print(received[0].body_as_str()) +# count += 1 +# time.sleep(1) +# except: +# raise +# finally: +# client.stop() + + +@pytest.mark.liveTest +def test_receive_end_of_stream(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) + try: + client.run() + + received = receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Receiving only a single event")) + received = receiver.receive(timeout=5) + assert len(received) == 1 + + assert received[0].body_as_str() == "Receiving only a single event" + assert list(received[-1].body)[0] == b"Receiving only a single event" + except: + raise + finally: + client.stop() + + +@pytest.mark.liveTest +def test_receive_with_offset_sync(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClient.from_connection_string(connection_str, debug=False) + partitions = client.get_eventhub_info() + assert partitions["partition_ids"] == ["0", "1"] + receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) + try: + client.run() + more_partitions = client.get_eventhub_info() + assert more_partitions["partition_ids"] == ["0", "1"] + + received = receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Data")) + received = receiver.receive(timeout=5) + assert len(received) == 1 + offset = received[0].offset + + assert list(received[0].body) == [b'Data'] + assert received[0].body_as_str() == "Data" + + offset_receiver = client.add_receiver("$default", "0", offset=offset) + client.run() + received = offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message after offset")) + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 + except: + raise + finally: + client.stop() + + +@pytest.mark.liveTest +def test_receive_with_inclusive_offset(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) + try: + client.run() + + received = receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Data")) + time.sleep(1) + received = receiver.receive(timeout=5) + assert len(received) == 1 + offset = received[0].offset + + assert list(received[0].body) == [b'Data'] + assert received[0].body_as_str() == "Data" + + offset_receiver = client.add_receiver("$default", "0", offset=Offset(offset.value, inclusive=True)) + client.run() + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 + except: + raise + finally: + client.stop() + + +@pytest.mark.liveTest +def test_receive_with_datetime_sync(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClient.from_connection_string(connection_str, debug=False) + partitions = client.get_eventhub_info() + assert partitions["partition_ids"] == ["0", "1"] + receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) + try: + client.run() + more_partitions = client.get_eventhub_info() + assert more_partitions["partition_ids"] == ["0", "1"] + received = receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Data")) + received = receiver.receive(timeout=5) + assert len(received) == 1 + offset = received[0].enqueued_time + + assert list(received[0].body) == [b'Data'] + assert received[0].body_as_str() == "Data" + + offset_receiver = client.add_receiver("$default", "0", offset=Offset(offset)) + client.run() + received = offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message after timestamp")) + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 + except: + raise + finally: + client.stop() + + +@pytest.mark.liveTest +def test_receive_with_custom_datetime_sync(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClient.from_connection_string(connection_str, debug=False) + for i in range(5): + senders[0].send(EventData(b"Message before timestamp")) + time.sleep(60) + + now = datetime.datetime.utcnow() + offset = datetime.datetime(now.year, now.month, now.day, now.hour, now.minute) + for i in range(5): + senders[0].send(EventData(b"Message after timestamp")) + + receiver = client.add_receiver("$default", "0", offset=Offset(offset)) + try: + client.run() + all_received = [] + received = receiver.receive(timeout=1) + while received: + all_received.extend(received) + received = receiver.receive(timeout=1) + + assert len(all_received) == 5 + for received_event in all_received: + assert received_event.body_as_str() == "Message after timestamp" + assert received_event.enqueued_time > offset + except: + raise + finally: + client.stop() + + +@pytest.mark.liveTest +def test_receive_with_sequence_no(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) + try: + client.run() + + received = receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Data")) + time.sleep(1) + received = receiver.receive(timeout=5) + assert len(received) == 1 + offset = received[0].sequence_number + + offset_receiver = client.add_receiver("$default", "0", offset=Offset(offset)) + client.run() + received = offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message next in sequence")) + time.sleep(1) + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 + except: + raise + finally: + client.stop() + + +@pytest.mark.liveTest +def test_receive_with_inclusive_sequence_no(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) + try: + client.run() + + received = receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Data")) + received = receiver.receive(timeout=5) + assert len(received) == 1 + offset = received[0].sequence_number + + offset_receiver = client.add_receiver("$default", "0", offset=Offset(offset, inclusive=True)) + client.run() + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 + except: + raise + finally: + client.stop() + + +@pytest.mark.liveTest +def test_receive_batch(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.add_receiver("$default", "0", prefetch=500, offset=Offset('@latest')) + try: + client.run() + + received = receiver.receive(timeout=5) + assert len(received) == 0 + for i in range(10): + senders[0].send(EventData(b"Data")) + received = receiver.receive(max_batch_size=5, timeout=5) + assert len(received) == 5 + except: + raise + finally: + client.stop() + + +@pytest.mark.liveTest +def test_receive_batch_with_app_prop_sync(connstr_senders): + pytest.skip("Waiting on uAMQP release") + connection_str, senders = connstr_senders + + def batched(): + for i in range(10): + yield "Event Data {}".format(i) + for i in range(10, 20): + yield EventData("Event Data {}".format(i)) + + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.add_receiver("$default", "0", prefetch=500, offset=Offset('@latest')) + try: + client.run() + + received = receiver.receive(timeout=5) + assert len(received) == 0 + + app_prop_key = "raw_prop" + app_prop_value = "raw_value" + batch_app_prop = {app_prop_key:app_prop_value} + batch_event = EventData(batch=batched()) + batch_event.application_properties = batch_app_prop + + senders[0].send(batch_event) + + time.sleep(1) + + received = receiver.receive(max_batch_size=15, timeout=5) + assert len(received) == 15 + + for index, message in enumerate(received): + assert list(message.body)[0] == "Event Data {}".format(index).encode('utf-8') + assert (app_prop_key.encode('utf-8') in message.application_properties) \ + and (dict(message.application_properties)[app_prop_key.encode('utf-8')] == app_prop_value.encode('utf-8')) + except: + raise + finally: + client.stop() diff --git a/azure-eventhubs/tests/test_reconnect.py b/azure-eventhubs/tests/test_reconnect.py new file mode 100644 index 000000000000..d44fb77106bb --- /dev/null +++ b/azure-eventhubs/tests/test_reconnect.py @@ -0,0 +1,82 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import time +import pytest + +from azure import eventhub +from azure.eventhub import ( + EventData, + Offset, + EventHubError, + EventHubClient) + + +@pytest.mark.liveTest +def test_send_with_long_interval_sync(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClient.from_connection_string(connection_str, debug=True) + sender = client.add_sender() + try: + client.run() + sender.send(EventData(b"A single event")) + for _ in range(2): + time.sleep(300) + sender.send(EventData(b"A single event")) + finally: + client.stop() + + received = [] + for r in receivers: + received.extend(r.receive(timeout=1)) + + assert len(received) == 3 + assert list(received[0].body)[0] == b"A single event" + + +@pytest.mark.liveTest +def test_send_with_forced_conn_close_sync(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClient.from_connection_string(connection_str, debug=True) + sender = client.add_sender() + try: + client.run() + sender.send(EventData(b"A single event")) + sender._handler._message_sender.destroy() + time.sleep(300) + sender.send(EventData(b"A single event")) + sender.send(EventData(b"A single event")) + sender._handler._message_sender.destroy() + time.sleep(300) + sender.send(EventData(b"A single event")) + sender.send(EventData(b"A single event")) + finally: + client.stop() + + received = [] + for r in receivers: + received.extend(r.receive(timeout=1)) + assert len(received) == 5 + assert list(received[0].body)[0] == b"A single event" + + +# def test_send_with_forced_link_detach(connstr_receivers): +# connection_str, receivers = connstr_receivers +# client = EventHubClient.from_connection_string(connection_str, debug=True) +# sender = client.add_sender() +# size = 20 * 1024 +# try: +# client.run() +# for i in range(1000): +# sender.transfer(EventData([b"A"*size, b"B"*size, b"C"*size, b"D"*size, b"A"*size, b"B"*size, b"C"*size, b"D"*size, b"A"*size, b"B"*size, b"C"*size, b"D"*size])) +# sender.wait() +# finally: +# client.stop() + +# received = [] +# for r in receivers: +# received.extend(r.receive(timeout=10)) diff --git a/azure-eventhubs/tests/test_send.py b/azure-eventhubs/tests/test_send.py new file mode 100644 index 000000000000..f7a8ccc3b158 --- /dev/null +++ b/azure-eventhubs/tests/test_send.py @@ -0,0 +1,285 @@ +# -- coding: utf-8 -- +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import pytest +import time +import json +import sys + +from azure import eventhub +from azure.eventhub import EventData, EventHubClient + + +@pytest.mark.liveTest +def test_send_with_partition_key(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.add_sender() + try: + client.run() + + data_val = 0 + for partition in [b"a", b"b", b"c", b"d", b"e", b"f"]: + partition_key = b"test_partition_" + partition + for i in range(50): + data = EventData(str(data_val)) + data.partition_key = partition_key + data_val += 1 + sender.send(data) + except: + raise + finally: + client.stop() + + found_partition_keys = {} + for index, partition in enumerate(receivers): + received = partition.receive(timeout=5) + for message in received: + try: + existing = found_partition_keys[message.partition_key] + assert existing == index + except KeyError: + found_partition_keys[message.partition_key] = index + + +@pytest.mark.liveTest +def test_send_and_receive_large_body_size(connstr_receivers): + if sys.platform.startswith('darwin'): + pytest.skip("Skipping on OSX - open issue regarding message size") + connection_str, receivers = connstr_receivers + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.add_sender() + try: + client.run() + payload = 250 * 1024 + sender.send(EventData("A" * payload)) + except: + raise + finally: + client.stop() + + received = [] + for r in receivers: + received.extend(r.receive(timeout=4)) + + assert len(received) == 1 + assert len(list(received[0].body)[0]) == payload + + +@pytest.mark.liveTest +def test_send_and_receive_zero_length_body(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.add_sender() + try: + client.run() + sender.send(EventData("")) + except: + raise + finally: + client.stop() + + received = [] + for r in receivers: + received.extend(r.receive(timeout=1)) + + assert len(received) == 1 + assert list(received[0].body)[0] == b"" + + +@pytest.mark.liveTest +def test_send_single_event(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.add_sender() + try: + client.run() + sender.send(EventData(b"A single event")) + except: + raise + finally: + client.stop() + + received = [] + for r in receivers: + received.extend(r.receive(timeout=1)) + + assert len(received) == 1 + assert list(received[0].body)[0] == b"A single event" + + +@pytest.mark.liveTest +def test_send_batch_sync(connstr_receivers): + connection_str, receivers = connstr_receivers + def batched(): + for i in range(10): + yield "Event number {}".format(i) + + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.add_sender() + try: + client.run() + sender.send(EventData(batch=batched())) + except: + raise + finally: + client.stop() + + time.sleep(1) + received = [] + for r in receivers: + received.extend(r.receive(timeout=3)) + + assert len(received) == 10 + for index, message in enumerate(received): + assert list(message.body)[0] == "Event number {}".format(index).encode('utf-8') + + +@pytest.mark.liveTest +def test_send_partition(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.add_sender(partition="1") + try: + client.run() + sender.send(EventData(b"Data")) + except: + raise + finally: + client.stop() + + partition_0 = receivers[0].receive(timeout=2) + assert len(partition_0) == 0 + partition_1 = receivers[1].receive(timeout=2) + assert len(partition_1) == 1 + + +@pytest.mark.liveTest +def test_send_non_ascii(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.add_sender(partition="0") + try: + client.run() + sender.send(EventData(u"é,è,à,ù,â,ê,î,ô,û")) + sender.send(EventData(json.dumps({"foo": u"漢字"}))) + except: + raise + finally: + client.stop() + + partition_0 = receivers[0].receive(timeout=2) + assert len(partition_0) == 2 + assert partition_0[0].body_as_str() == u"é,è,à,ù,â,ê,î,ô,û" + assert partition_0[1].body_as_json() == {"foo": u"漢字"} + + +@pytest.mark.liveTest +def test_send_partition_batch(connstr_receivers): + connection_str, receivers = connstr_receivers + def batched(): + for i in range(10): + yield "Event number {}".format(i) + + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.add_sender(partition="1") + try: + client.run() + sender.send(EventData(batch=batched())) + time.sleep(1) + except: + raise + finally: + client.stop() + + partition_0 = receivers[0].receive(timeout=2) + assert len(partition_0) == 0 + partition_1 = receivers[1].receive(timeout=2) + assert len(partition_1) == 10 + + +@pytest.mark.liveTest +def test_send_array_sync(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClient.from_connection_string(connection_str, debug=True) + sender = client.add_sender() + try: + client.run() + sender.send(EventData([b"A", b"B", b"C"])) + except: + raise + finally: + client.stop() + + received = [] + for r in receivers: + received.extend(r.receive(timeout=1)) + + assert len(received) == 1 + assert list(received[0].body) == [b"A", b"B", b"C"] + + +@pytest.mark.liveTest +def test_send_multiple_clients(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender_0 = client.add_sender(partition="0") + sender_1 = client.add_sender(partition="1") + try: + client.run() + sender_0.send(EventData(b"Message 0")) + sender_1.send(EventData(b"Message 1")) + except: + raise + finally: + client.stop() + + partition_0 = receivers[0].receive(timeout=2) + assert len(partition_0) == 1 + partition_1 = receivers[1].receive(timeout=2) + assert len(partition_1) == 1 + + +@pytest.mark.liveTest +def test_send_batch_with_app_prop_sync(connstr_receivers): + pytest.skip("Waiting on uAMQP release") + connection_str, receivers = connstr_receivers + def batched(): + for i in range(10): + yield "Event number {}".format(i) + for i in range(10, 20): + yield EventData("Event number {}".format(i)) + + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.add_sender() + try: + client.run() + + app_prop_key = "raw_prop" + app_prop_value = "raw_value" + batch_app_prop = {app_prop_key:app_prop_value} + batch_event = EventData(batch=batched()) + batch_event.application_properties = batch_app_prop + + sender.send(batch_event) + except: + raise + finally: + client.stop() + + time.sleep(1) + + received = [] + for r in receivers: + received.extend(r.receive(timeout=3)) + + assert len(received) == 20 + for index, message in enumerate(received): + assert list(message.body)[0] == "Event number {}".format(index).encode('utf-8') + assert (app_prop_key.encode('utf-8') in message.application_properties) \ + and (dict(message.application_properties)[app_prop_key.encode('utf-8')] == app_prop_value.encode('utf-8'))