From 0b55ff5ec10167fcbb9eb62e37a9aa03620b73ed Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Fri, 2 Jun 2017 21:11:39 -0700 Subject: [PATCH 01/63] wip --- .../cloud/pubsub_v1/subscriber/__init__.py | 22 +++++++ .../cloud/pubsub_v1/subscriber/client.py | 65 +++++++++++++++++++ 2 files changed, 87 insertions(+) create mode 100644 pubsub/google/cloud/pubsub_v1/subscriber/__init__.py create mode 100644 pubsub/google/cloud/pubsub_v1/subscriber/client.py diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/__init__.py b/pubsub/google/cloud/pubsub_v1/subscriber/__init__.py new file mode 100644 index 000000000000..8a122da149d1 --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_literals + +from google.cloud.pubsub_v1.subscriber.client import SubscriberClient + + +__all__ = ( + 'SubscriberClient', +) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/client.py b/pubsub/google/cloud/pubsub_v1/subscriber/client.py new file mode 100644 index 000000000000..9bda5d206ef4 --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/client.py @@ -0,0 +1,65 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import functools +import pkg_resources + +import six + +from google.cloud.gapic.pubsub.v1 import subscriber_client + +from google.cloud.pubsub_v1 import _gapic +from google.cloud.pubsub_v1 import types + + +__VERSION__ = pkg_resources.get_distribution('google-cloud-pubsub').version + + +@_gapic.add_methods(subscriber_client.SubscriberClient, + blacklist=('pull', 'streaming_pull')): +class SubscriberClient(object): + """A subscriber client for Google Cloud Pub/Sub. + + This creates an object that is capable of subscribing to messages. + Generally, you can instantiate this client with no arguments, and you + get sensible defaults. + + Args: + flow_control (~.pubsub_v1.types.FlowControl): The flow control + settings to be used on individual subscriptions. + subscription_class (class): A class that describes how to handle + subscriptions. You may subclass the + :class:`.pubsub_v1.subscriber.subscription.base.BaseSubscription` + class in order to define your own subscriber. This is primarily + provided to allow use of different concurrency models; the default + is based on :class:`multiprocessing.Process`. + **kwargs (dict): Any additional arguments provided are sent as keyword + keyword arguments to the underlying + :class:`~.gapic.pubsub.v1.subscriber_client.SubscriberClient`. + Generally, you should not need to set additional keyword + arguments. + """ + def __init__(self, flow_control=(), subscription_class=mp.Subscription, + **kwargs): + # Add the metrics headers, and instantiate the underlying GAPIC + # client. + kwargs['lib_name'] = 'gccl' + kwargs['lib_version'] = __VERSION__ + self.api = subscriber_client.SubscriberClient(**kwargs) + + # The subcription class is responsible to retrieving and dispatching + # messages. + self._subscription_class = subscription_class From 210ef3bb6fdb00a14abad22092597489e72c830e Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Fri, 2 Jun 2017 23:25:09 -0700 Subject: [PATCH 02/63] Wrote some docs; not much else. --- .../cloud/pubsub_v1/subscriber/client.py | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/client.py b/pubsub/google/cloud/pubsub_v1/subscriber/client.py index 9bda5d206ef4..a570795ea4f2 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/client.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/client.py @@ -63,3 +63,31 @@ def __init__(self, flow_control=(), subscription_class=mp.Subscription, # The subcription class is responsible to retrieving and dispatching # messages. self._subscription_class = subscription_class + + def subscribe(self, topic, name, callback=None, flow_control=()): + """Return a representation of an individual subscription. + + This method creates and returns a ``Subscription`` object (that is, a + :class:`~.pubsub_v1.subscriber.subscription.base.BaseSubscription`) + subclass) bound to the topic. It does `not` create the subcription + on the backend (or do any API call at all); it simply returns an + object capable of doing these things. + + If the ``callback`` argument is provided, then the :meth:`open` method + is automatically called on the returned object. If ``callback`` is + not provided, the subscription is returned unopened. + + .. note:: + It only makes sense to provide ``callback`` here if you have + already created the subscription manually in the API. + + Args: + topic (str): The topic being subscribed to. + name (str): The name of the subscription. + callback (function): The callback function. This function receives + the :class:`~.pubsub_v1.types.PubsubMessage` as its only + argument. + flow_control (~.pubsub_v1.types.FlowControl): The flow control + settings. Use this to prevent situations where you are + inundated with too many messages at once. + """ From 7cd6156335cdaa81139a94804811a33b13a7afc4 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Sat, 3 Jun 2017 17:10:10 -0700 Subject: [PATCH 03/63] subscriber wip --- .../cloud/pubsub_v1/subscriber/client.py | 15 +- .../pubsub_v1/subscriber/consumer/__init__.py | 0 .../pubsub_v1/subscriber/consumer/base.py | 77 ++++++++++ .../cloud/pubsub_v1/subscriber/consumer/mp.py | 0 .../cloud/pubsub_v1/subscriber/histogram.py | 144 ++++++++++++++++++ .../cloud/pubsub_v1/subscriber/message.py | 116 ++++++++++++++ 6 files changed, 345 insertions(+), 7 deletions(-) create mode 100644 pubsub/google/cloud/pubsub_v1/subscriber/consumer/__init__.py create mode 100644 pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py create mode 100644 pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py create mode 100644 pubsub/google/cloud/pubsub_v1/subscriber/histogram.py create mode 100644 pubsub/google/cloud/pubsub_v1/subscriber/message.py diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/client.py b/pubsub/google/cloud/pubsub_v1/subscriber/client.py index a570795ea4f2..14c4da4d25af 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/client.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/client.py @@ -23,6 +23,7 @@ from google.cloud.pubsub_v1 import _gapic from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.subscriber.consumer import mp __VERSION__ = pkg_resources.get_distribution('google-cloud-pubsub').version @@ -40,10 +41,10 @@ class SubscriberClient(object): Args: flow_control (~.pubsub_v1.types.FlowControl): The flow control settings to be used on individual subscriptions. - subscription_class (class): A class that describes how to handle + consumer_class (class): A class that describes how to handle subscriptions. You may subclass the - :class:`.pubsub_v1.subscriber.subscription.base.BaseSubscription` - class in order to define your own subscriber. This is primarily + :class:`.pubsub_v1.subscriber.consumer.base.BaseConsumer` + class in order to define your own consumer. This is primarily provided to allow use of different concurrency models; the default is based on :class:`multiprocessing.Process`. **kwargs (dict): Any additional arguments provided are sent as keyword @@ -52,7 +53,7 @@ class in order to define your own subscriber. This is primarily Generally, you should not need to set additional keyword arguments. """ - def __init__(self, flow_control=(), subscription_class=mp.Subscription, + def __init__(self, flow_control=(), consumer_class=mp.Consumer, **kwargs): # Add the metrics headers, and instantiate the underlying GAPIC # client. @@ -62,13 +63,13 @@ def __init__(self, flow_control=(), subscription_class=mp.Subscription, # The subcription class is responsible to retrieving and dispatching # messages. - self._subscription_class = subscription_class + self._consumer_class = consumer_class def subscribe(self, topic, name, callback=None, flow_control=()): """Return a representation of an individual subscription. - This method creates and returns a ``Subscription`` object (that is, a - :class:`~.pubsub_v1.subscriber.subscription.base.BaseSubscription`) + This method creates and returns a ``Consumer`` object (that is, a + :class:`~.pubsub_v1.subscriber.consumer.base.BaseConsumer`) subclass) bound to the topic. It does `not` create the subcription on the backend (or do any API call at all); it simply returns an object capable of doing these things. diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/__init__.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py new file mode 100644 index 000000000000..90be68d07559 --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py @@ -0,0 +1,77 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import abc + +import six + +from google.cloud.pubsub_v1.subscriber import histogram + + +@six.add_metaclass(abc.ABCMeta) +class BaseConsumer(object): + """Abstract base class for consumers. + + Although the :class:`~.pubsub_v1.subscriber.consumer.mp.Consumer` class, + based on :class:`multiprocessing.Process`, is fine for most cases, + advanced users may need to implement something based on a different + concurrency model. + + This class defines the interface for the consumer implementation; + subclasses may be passed as the ``consumer_class`` argument to + :class:`~.pubsub_v1.client.SubscriberClient`. + """ + def __init__(self, client, subscription): + self._client = client + self._subscription = subscription + self._ack_deadline = 10 + self._last_histogram_size = 0 + self.histogram = histogram.Histogram() + + @property + def ack_deadline(self): + """Return the appropriate ack deadline. + + This method is "sticky". It will only perform the computations to + check on the right ack deadline if the histogram has gained a + significant amount of new information. + + Returns: + int: The correct ack deadline. + """ + if len(self.histogram) > self._last_histogram_size * 2: + self._ack_deadline = self.histogram.percentile(percent=99) + return self._ack_deadline + + @abc.abstractmethod + def ack(self, ack_id): + """Acknowledge the message corresponding to the given ack_id.""" + raise NotImplementedError + + @abc.abstractmethod + def modify_ack_deadline(self, ack_id, seconds): + """Modify the ack deadline for the given ack_id.""" + raise NotImplementedError + + @abc.abstractmethod + def open(self, callback): + """Open a streaming pull connection and begin receiving messages. + + For each message received, the ``callback`` function is fired with + a :class:`~.pubsub_v1.subscriber.message.Message` as its only + argument. + """ + raise NotImplementedError diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py b/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py new file mode 100644 index 000000000000..7e0a96a9a877 --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py @@ -0,0 +1,144 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division + + +class Histogram(object): + """Representation of a single histogram. + + The purpose of this class is to store actual ack timing information + in order to predict how long to renew leases. + + The default implementation uses the 99th percentile of previous ack + times to implicitly lease messages; however, custom + :class:`~.pubsub_v1.subscriber.consumer.base.BaseConsumer` subclasses + are free to use a different formula. + + The precision of data stored is to the nearest integer. Additionally, + values outside the range of ``10 <= x <= 600`` are stored as ``10`` or + ``600``, since these are the boundaries of leases in the actual API. + """ + def __init__(self): + # The data is stored as a dictionary, with the keys being the + # value being added and the values being the number of times that + # value was added to the dictionary. + # + # This is depending on the Python interpreter's implicit ordering + # of dictionaries, which is a bitwise sort by the key's ``hash()`` + # value. Because ``hash(int i) -> i`` and all of our keys are + # positive integers (negatives would be a problem because the sort + # is bitwise), we can rely on this. + self._data = {} + self._len = 0 + + def __len__(self): + """Return the total number of data points in this histogram. + + This is cached on a separate counter (rather than computing it using + ``sum([v for v in self._data.values()])``) to optimize lookup. + + Returns: + int: The total number of data points in this histogram. + """ + return self._len + + def __contains__(self, needle): + """Return True if needle is present in the histogram, False otherwise. + + Returns: + bool: True or False + """ + return needle in self._data + + def __repr__(self): + return ''.format( + len=len(self), + max=self.max, + min=self.min, + ) + + @property + def max(self): + """Return the maximum value in this histogram. + + If there are no values in the histogram at all, return 600. + + Returns: + int: The maximum value in the histogram. + """ + if len(self._data) == 0: + return 600 + return next(iter(reversed(list(self._data.keys())))) + + @property + def min(self): + """Return the minimum value in this histogram. + + If there are no values in the histogram at all, return 10. + + Returns: + int: The minimum value in the histogram. + """ + if len(self._data) == 0: + return 10 + return next(iter(self._data.keys())) + + def add(self, value): + """Add the value to this histogram. + + Args: + value (int): The value. Values outside of ``10 <= x <= 600`` + will be raised to ``10`` or reduced to ``600``. + """ + # If the value is out of bounds, bring it in bounds. + value = int(value) + if value < 10: + value = 10 + if value > 600: + value = 600 + + # Add the value to the histogram's data dictionary. + self._data.setdefault(value, 0) + self._data[value] += 1 + self._len += 1 + + def percentile(self, percent): + """Return the value that is the Nth precentile in the histogram. + + Args: + percent (int|float): The precentile being sought. The default + consumer implementations use consistently use ``99``. + + Returns: + int: The value corresponding to the requested percentile. + """ + # Sanity check: Any value over 100 should become 100. + if percent >= 100: + percent = 100 + + # Determine the actual target number. + target = len(self) - len(self) * (percent / 100) + + # Iterate over the values in reverse, dropping the target by the + # number of times each value has been seen. When the target reaches + # 0, return the value we are currently viewing. + for k in reversed(list(self._data.keys())): + target -= self._data[k] + if target <= 0: + return self._data[k] + + # The only way to get here is if there was no data. + # In this case, just return 10 seconds. + return 10 diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py new file mode 100644 index 000000000000..73b650973214 --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py @@ -0,0 +1,116 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import math +import time + + +class Message(object): + """A representation of a single Pub/Sub message. + + The common way to interact with + :class:`~.pubsub_v1.subscriber.message.Message` objects is to receive + them in callbacks on subscriptions; most users should never have a need + to instantiate them by hand. (The exception to this is if you are + implementing a custom subclass to + :class:`~.pubsub_v1.subscriber.consumer.BaseConsumer`.) + + .. note:: + Messages in Google Cloud Pub/Sub are opaque blobs of bytes. This + means that the ``data`` attribute will consistently be a + :class:`bytes` object. If you want a text string, you should + use :meth:`bytes.decode`. + + Properties: + message_id (str): The message ID. In general, you should not need + to use this directly. + data (bytes): The data in the message. + attributes (dict): The attributes sent along with the message. + publish_time (datetime): The time that this message was originally + published. + """ + def __init__(self, consumer, ack_id, message): + """Construct the Message. + + Args: + consumer (~.pubsub_v1.subscriber.consumer.BaseConsumer): The + consumer which originally received this message. + ack_id (str): The ack_id received from Pub/Sub. + message (~.pubsub_v1.types.PubsubMessage): The message received + from Pub/Sub. + """ + self._consumer = consumer + self._ack_id = ack_id + self.message_id = message.message_id + self.data = message.data + self.attributes = message.attributes + self.publish_time = message.publish_time + + # The instantiation time is the time that this message + # was received. Tracking this provides us a way to be smart about + # the default lease deadline. + self._received_timestamp = time.time() + + def __repr__(self): + # Get an abbreviated version of the data. + abbv_data = self._data + if len(answer) > 50: + abbv_data = abbv_data[0:50] + b'...' + + # Return a useful representation. + answer = 'Message {\n' + answer += ' data: {0!r}\n'.format(abbv_data) + answer += ' attributes: {0!r}\n'.format(self.attributes) + answer += '}' + + def ack(self): + """Acknowledge the given message. + + .. note:: + Acknowledging a message in Pub/Sub means that you are done + with it, and it will not be delivered to this subscription again. + You should avoid acknowledging messages until you have + *finished* processing them, so that in the event of a failure, + you receive the message again. + + Additionally, acks in Pub/Sub are best effort. You should always + ensure that your processing code is idempotent, as you may + receive any given message more than once. + """ + time_to_ack = math.ceil(time.time() - self._received_timestamp) + self._consumer.histogram.add(time_to_ack) + self._consumer.ack(self._ack_id) + + def modify_ack_deadline(self, seconds): + """Set the deadline for acknowledgement to the given value. + + This is not an extension; it *sets* the deadline to the given number + of seconds from right now. It is even possible to use this method to + make a deadline shorter. + + The default implementation handles this for you; you should not need + to manually deal with setting ack deadlines. The exception case is + if you are implementing your own custom subclass of + :class:`~.pubsub_v1.subcriber.consumer.BaseConsumer`. + """ + self._consumer.modify_ack_deadline(self._ack_id, seconds) + + def nack(self): + """Decline to acknowldge the given message. + + This will cause the message to be re-delivered to the subscription. + """ + self.modify_ack_deadline(seconds=0) From fc4ead324a7a39bed15c7a1d63c26c801e600ea6 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 5 Jun 2017 10:26:44 -0700 Subject: [PATCH 04/63] WIP --- .../cloud/pubsub_v1/subscriber/client.py | 18 ++++- .../pubsub_v1/subscriber/consumer/base.py | 22 ++++- .../cloud/pubsub_v1/subscriber/consumer/mp.py | 80 +++++++++++++++++++ .../cloud/pubsub_v1/subscriber/exceptions.py | 19 +++++ .../cloud/pubsub_v1/subscriber/histogram.py | 15 +++- .../cloud/pubsub_v1/subscriber/message.py | 57 ++++++++++--- 6 files changed, 192 insertions(+), 19 deletions(-) create mode 100644 pubsub/google/cloud/pubsub_v1/subscriber/exceptions.py diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/client.py b/pubsub/google/cloud/pubsub_v1/subscriber/client.py index 14c4da4d25af..a17621b1510e 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/client.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/client.py @@ -40,7 +40,8 @@ class SubscriberClient(object): Args: flow_control (~.pubsub_v1.types.FlowControl): The flow control - settings to be used on individual subscriptions. + settings. Use this to prevent situations where you are + inundated with too many messages at once. consumer_class (class): A class that describes how to handle subscriptions. You may subclass the :class:`.pubsub_v1.subscriber.consumer.base.BaseConsumer` @@ -65,7 +66,7 @@ def __init__(self, flow_control=(), consumer_class=mp.Consumer, # messages. self._consumer_class = consumer_class - def subscribe(self, topic, name, callback=None, flow_control=()): + def subscribe(self, subscription, callback=None): """Return a representation of an individual subscription. This method creates and returns a ``Consumer`` object (that is, a @@ -83,12 +84,21 @@ def subscribe(self, topic, name, callback=None, flow_control=()): already created the subscription manually in the API. Args: - topic (str): The topic being subscribed to. - name (str): The name of the subscription. + subscription (str): The name of the subscription. The + subscription should have already been created (for example, + by using :meth:`create_subscription`). callback (function): The callback function. This function receives the :class:`~.pubsub_v1.types.PubsubMessage` as its only argument. flow_control (~.pubsub_v1.types.FlowControl): The flow control settings. Use this to prevent situations where you are inundated with too many messages at once. + + Returns: + ~.pubsub_v1.subscriber.consumer.base.BaseConsumer: An instance + of the defined ``consumer_class`` on the client. """ + subscr = self._consumer_class(self, subscription) + if callable(callback): + subscr.open(callback) + return subscr diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py index 90be68d07559..4e828107c128 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py @@ -34,12 +34,30 @@ class BaseConsumer(object): subclasses may be passed as the ``consumer_class`` argument to :class:`~.pubsub_v1.client.SubscriberClient`. """ - def __init__(self, client, subscription): + def __init__(self, client, subscription, histogram_data=None): + """Instantiate the consumer. + + Args: + client (~.pubsub_v1.subscriber.client): The subscriber client used + to create this instance. + subscription (str): The name of the subscription. The canonical format + for this is ``projects/{project}/subscriptions/{subscription}``. + histogram_data (dict): Optional: A structure to store the histogram + data for predicting appropriate ack times. If set, this should + be a dictionary-like object. + + .. note:: + Additionally, the histogram relies on the assumption + that the dictionary will properly sort keys provided + that all keys are positive integers. If you are sending + your own dictionary class, ensure this assumption holds + or you will get strange behavior. + """ self._client = client self._subscription = subscription self._ack_deadline = 10 self._last_histogram_size = 0 - self.histogram = histogram.Histogram() + self.histogram = histogram.Histogram(data=histogram_data) @property def ack_deadline(self): diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py index e69de29bb2d1..5a3e57e93deb 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py @@ -0,0 +1,80 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import abc +import multiprocessing + +import six + +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.subscriber import exceptions +from google.cloud.pubsub_v1.subscriber import histogram + + +class Consumer(object): + """A consumer class based on :class:``multiprocessing.Process``. + + This consumer handles the connection to the Pub/Sub service and all of + the concurrency needs. + """ + def __init__(self, client, subscription): + # Create a manager for keeping track of shared state. + self._manager = multiprocessing.Manager() + self._shared = self._manager.Namespace() + self._shared.outgoing_requests = self._manager.list() + self._shared.histogram_data = self._manager.dict() + + # Call the superclass constructor. + super(Consumer, self).__init__(client, subscription, + self._shared.histogram_data) + + # Keep track of the GRPC connection. + self._connection = None + + @abc.abstractmethod + def ack(self, ack_id): + """Acknowledge the message corresponding to the given ack_id.""" + self._shared.outgoing_requests.append(types.StreamingPullRequest( + ack_ids=[ack_id], + )) + + @abc.abstractmethod + def modify_ack_deadline(self, ack_id, seconds): + """Modify the ack deadline for the given ack_id.""" + self._shared.outgoing_requests.append(types.StreamingPullRequest( + modify_deadline_ack_ids=[ack_id], + modify_deadline_seconds=[seconds], + )) + + @abc.abstractmethod + def open(self, callback): + """Open a streaming pull connection and begin receiving messages. + + For each message received, the ``callback`` function is fired with + a :class:`~.pubsub_v1.subscriber.message.Message` as its only + argument. + """ + # Sanity check: If the connection is already open, fail. + if self._connection is not None: + raise exceptions.AlreadyOpen(self._subscription) + + # The streaming connection expects a series of StreamingPullRequest + # objects. The first one must specify the subscription and the + # ack deadline; prepend this to the list. + self._shared.outgoing_requests.insert(0, types.StreamingPullRequest( + stream_ack_deadline_seconds=self.ack_deadline, + subscription=self._subscription, + )) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/exceptions.py b/pubsub/google/cloud/pubsub_v1/subscriber/exceptions.py new file mode 100644 index 000000000000..43a659974c23 --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/exceptions.py @@ -0,0 +1,19 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + + +class AlreadyOpen(RuntimeError): + pass diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py b/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py index 7e0a96a9a877..b5df134260e4 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py @@ -30,7 +30,16 @@ class Histogram(object): values outside the range of ``10 <= x <= 600`` are stored as ``10`` or ``600``, since these are the boundaries of leases in the actual API. """ - def __init__(self): + def __init__(self, data=None): + """Instantiate the histogram. + + Args: + data (dict): The data strucure to be used to store the + underlying data. The default is an empty dictionary. + This can be set to a dictionary-like object if required + (for example, if a special object is needed for + concurrency reasons). + """ # The data is stored as a dictionary, with the keys being the # value being added and the values being the number of times that # value was added to the dictionary. @@ -40,7 +49,9 @@ def __init__(self): # value. Because ``hash(int i) -> i`` and all of our keys are # positive integers (negatives would be a problem because the sort # is bitwise), we can rely on this. - self._data = {} + if data is None: + data = {} + self._data = data self._len = 0 def __len__(self): diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py index 73b650973214..1433a5df6805 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/message.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py @@ -54,6 +54,7 @@ def __init__(self, consumer, ack_id, message): """ self._consumer = consumer self._ack_id = ack_id + self._message = message self.message_id = message.message_id self.data = message.data self.attributes = message.attributes @@ -76,17 +77,45 @@ def __repr__(self): answer += ' attributes: {0!r}\n'.format(self.attributes) answer += '}' + @property + def attributes(self): + """Return the attributes of the underlying Pub/Sub Message. + + Returns: + dict: The message's attributes. + """ + return self._message.attributes + + @property + def data(self): + """Return the data for the underlying Pub/Sub Message. + + Returns: + bytes: The message data. This is always a bytestring; if you + want a text string, call :meth:`bytes.decode`. + """ + return self._message.data + + @property + def publish_time(self): + """Return the time that the message was originally published. + + Returns: + datetime: The date and time that the message was published. + """ + return self._message.publish_time + def ack(self): """Acknowledge the given message. - .. note:: - Acknowledging a message in Pub/Sub means that you are done - with it, and it will not be delivered to this subscription again. - You should avoid acknowledging messages until you have - *finished* processing them, so that in the event of a failure, - you receive the message again. + Acknowledging a message in Pub/Sub means that you are done + with it, and it will not be delivered to this subscription again. + You should avoid acknowledging messages until you have + *finished* processing them, so that in the event of a failure, + you receive the message again. - Additionally, acks in Pub/Sub are best effort. You should always + .. warning:: + Acks in Pub/Sub are best effort. You should always ensure that your processing code is idempotent, as you may receive any given message more than once. """ @@ -97,14 +126,20 @@ def ack(self): def modify_ack_deadline(self, seconds): """Set the deadline for acknowledgement to the given value. - This is not an extension; it *sets* the deadline to the given number - of seconds from right now. It is even possible to use this method to - make a deadline shorter. - The default implementation handles this for you; you should not need to manually deal with setting ack deadlines. The exception case is if you are implementing your own custom subclass of :class:`~.pubsub_v1.subcriber.consumer.BaseConsumer`. + + .. note:: + This is not an extension; it *sets* the deadline to the given + number of seconds from right now. It is even possible to use this + method to make a deadline shorter. + + Args: + seconds (int): The number of seconds to set the lease deadline + to. This should be between 0 and 600. Due to network latency, + values below 10 are advised against. """ self._consumer.modify_ack_deadline(self._ack_id, seconds) From 890de3ad93f4d8bcafb9c5178fec17d26ba66544 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 5 Jun 2017 14:59:36 -0700 Subject: [PATCH 05/63] wip --- .../cloud/pubsub_v1/subscriber/consumer/mp.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py index 5a3e57e93deb..a73119a0c69e 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py @@ -39,19 +39,18 @@ def __init__(self, client, subscription): # Call the superclass constructor. super(Consumer, self).__init__(client, subscription, - self._shared.histogram_data) + histogram_data=self._shared.histogram_data, + ) # Keep track of the GRPC connection. - self._connection = None + self._process = None - @abc.abstractmethod def ack(self, ack_id): """Acknowledge the message corresponding to the given ack_id.""" self._shared.outgoing_requests.append(types.StreamingPullRequest( ack_ids=[ack_id], )) - @abc.abstractmethod def modify_ack_deadline(self, ack_id, seconds): """Modify the ack deadline for the given ack_id.""" self._shared.outgoing_requests.append(types.StreamingPullRequest( @@ -59,7 +58,6 @@ def modify_ack_deadline(self, ack_id, seconds): modify_deadline_seconds=[seconds], )) - @abc.abstractmethod def open(self, callback): """Open a streaming pull connection and begin receiving messages. @@ -78,3 +76,10 @@ def open(self, callback): stream_ack_deadline_seconds=self.ack_deadline, subscription=self._subscription, )) + + # Open the request. + self._process = multiprocessing.Process(self.stream) + self._process.start() + + def stream(self): + """Stream data to and from the Cloud Pub/Sub service.""" From 12ace0ecf754fb632f46b3a2e41ca04b7de17483 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 7 Jun 2017 07:33:56 -0700 Subject: [PATCH 06/63] wip --- .../cloud/pubsub_v1/subscriber/__init__.py | 2 +- .../cloud/pubsub_v1/subscriber/client.py | 2 +- .../pubsub_v1/subscriber/consumer/base.py | 9 +++++ .../cloud/pubsub_v1/subscriber/consumer/mp.py | 40 +++++++++++++++---- 4 files changed, 44 insertions(+), 9 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/__init__.py b/pubsub/google/cloud/pubsub_v1/subscriber/__init__.py index 8a122da149d1..ee2aaca57ef0 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/__init__.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_literals +from __future__ import absolute_import from google.cloud.pubsub_v1.subscriber.client import SubscriberClient diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/client.py b/pubsub/google/cloud/pubsub_v1/subscriber/client.py index a17621b1510e..c2bf71c46f78 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/client.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/client.py @@ -30,7 +30,7 @@ @_gapic.add_methods(subscriber_client.SubscriberClient, - blacklist=('pull', 'streaming_pull')): + blacklist=('pull', 'streaming_pull')) class SubscriberClient(object): """A subscriber client for Google Cloud Pub/Sub. diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py index 4e828107c128..2b48b8a08578 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py @@ -74,6 +74,15 @@ def ack_deadline(self): self._ack_deadline = self.histogram.percentile(percent=99) return self._ack_deadline + @property + def subscription(self): + """Return the subscription. + + Returns: + str: The subscription + """ + return self._subscription + @abc.abstractmethod def ack(self, ack_id): """Acknowledge the message corresponding to the given ack_id.""" diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py index a73119a0c69e..42dd41ec22ef 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py @@ -34,6 +34,7 @@ def __init__(self, client, subscription): # Create a manager for keeping track of shared state. self._manager = multiprocessing.Manager() self._shared = self._manager.Namespace() + self._shared.subscription = subscription self._shared.outgoing_requests = self._manager.list() self._shared.histogram_data = self._manager.dict() @@ -45,12 +46,26 @@ def __init__(self, client, subscription): # Keep track of the GRPC connection. self._process = None + @property + def subscription(self): + """Return the subscription. + + Returns: + str: The subscription + """ + return self._shared.subscription + def ack(self, ack_id): """Acknowledge the message corresponding to the given ack_id.""" self._shared.outgoing_requests.append(types.StreamingPullRequest( ack_ids=[ack_id], )) + def close(self): + """Close the existing connection.""" + self._process.terminate() + self._process = None + def modify_ack_deadline(self, ack_id, seconds): """Modify the ack deadline for the given ack_id.""" self._shared.outgoing_requests.append(types.StreamingPullRequest( @@ -64,11 +79,22 @@ def open(self, callback): For each message received, the ``callback`` function is fired with a :class:`~.pubsub_v1.subscriber.message.Message` as its only argument. + + Args: + callback (function): The callback function. """ # Sanity check: If the connection is already open, fail. - if self._connection is not None: + if self._process is not None: raise exceptions.AlreadyOpen(self._subscription) + # Open the request. + self._process = multiprocessing.Process(self.stream) + self._process.daemon = True + self._process.start() + + def stream(self): + """Stream data to and from the Cloud Pub/Sub service.""" + # The streaming connection expects a series of StreamingPullRequest # objects. The first one must specify the subscription and the # ack deadline; prepend this to the list. @@ -77,9 +103,9 @@ def open(self, callback): subscription=self._subscription, )) - # Open the request. - self._process = multiprocessing.Process(self.stream) - self._process.start() - - def stream(self): - """Stream data to and from the Cloud Pub/Sub service.""" + import sys + try: + for r in self.api.streaming_pull(self._shared.outgoing_requests): + print(r, file=sys.stderr) + except GaxError: + return self.stream() From 73050002bfe6946641f4c3af4ddb271227238de7 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 14 Jun 2017 13:34:48 -0700 Subject: [PATCH 07/63] wip --- pubsub/google/cloud/pubsub_v1/publisher/client.py | 1 + .../google/cloud/pubsub_v1/subscriber/consumer/mp.py | 10 ++++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/publisher/client.py b/pubsub/google/cloud/pubsub_v1/publisher/client.py index 0e815395d74c..4941326c02a5 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/client.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/client.py @@ -14,6 +14,7 @@ from __future__ import absolute_import +import copy import functools import pkg_resources diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py index 42dd41ec22ef..a1bdc6d2cc4e 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py @@ -19,12 +19,15 @@ import six +from google.gax.errors import GaxError + from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.subscriber import exceptions from google.cloud.pubsub_v1.subscriber import histogram +from google.cloud.pubsub_v1.subscriber.consumer import base -class Consumer(object): +class Consumer(base.BaseConsumer): """A consumer class based on :class:``multiprocessing.Process``. This consumer handles the connection to the Pub/Sub service and all of @@ -105,7 +108,10 @@ def stream(self): import sys try: - for r in self.api.streaming_pull(self._shared.outgoing_requests): + outgoing = iter(self._shared.outgoing_requests) + import pdb ; pdb.set_trace() + for r in self._client.api.streaming_pull(outgoing): + import pdb ; pdb.set_trace() print(r, file=sys.stderr) except GaxError: return self.stream() From e5a27ae6fd1c4aa337b4931b03c135961112ae82 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 27 Jun 2017 08:12:43 -0700 Subject: [PATCH 08/63] Fix a couple minor lint issues. --- pubsub/google/cloud/pubsub_v1/publisher/batch/mp.py | 3 +-- pubsub/google/cloud/pubsub_v1/publisher/exceptions.py | 4 ++++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/publisher/batch/mp.py b/pubsub/google/cloud/pubsub_v1/publisher/batch/mp.py index ef5dfcfb11a4..de6b9005c79b 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/batch/mp.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/batch/mp.py @@ -16,7 +16,6 @@ import copy import multiprocessing -import queue import time import uuid @@ -332,7 +331,7 @@ def exception(self, timeout=None, _wait=1): # If the timeout has been exceeded, raise TimeoutError. if timeout and timeout < 0: - raise TimeoutError('Timed out waiting for an exception.') + raise exceptions.TimeoutError('Timed out waiting for exception.') # Wait a little while and try again. time.sleep(_wait) diff --git a/pubsub/google/cloud/pubsub_v1/publisher/exceptions.py b/pubsub/google/cloud/pubsub_v1/publisher/exceptions.py index e37993b24035..bedc5d5a2a48 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/exceptions.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/exceptions.py @@ -14,3 +14,7 @@ class PublishError(RuntimeError): pass + + +class TimeoutError(RuntimeError): + pass From d50a22e3667c777cc2647d257189ea7839ede6a0 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 27 Jun 2017 09:56:50 -0700 Subject: [PATCH 09/63] Adapting a subscriber that will work. --- pubsub/.flake8 | 6 + .../cloud/pubsub_v1/subscriber/bidi_stream.py | 259 ++++++++++++++++++ .../cloud/pubsub_v1/subscriber/client.py | 4 - .../pubsub_v1/subscriber/consumer/base.py | 83 +++++- .../cloud/pubsub_v1/subscriber/consumer/mp.py | 28 +- .../pubsub_v1/subscriber/helper_threads.py | 121 ++++++++ .../cloud/pubsub_v1/subscriber/message.py | 7 +- 7 files changed, 470 insertions(+), 38 deletions(-) create mode 100644 pubsub/google/cloud/pubsub_v1/subscriber/bidi_stream.py create mode 100644 pubsub/google/cloud/pubsub_v1/subscriber/helper_threads.py diff --git a/pubsub/.flake8 b/pubsub/.flake8 index 25168dc87605..712bd8afe7f4 100644 --- a/pubsub/.flake8 +++ b/pubsub/.flake8 @@ -4,3 +4,9 @@ exclude = .git, *.pyc, conf.py + +ignore = + # Allow "under-indented" continuation lines. + E124, + # Allow closing parentheses to column-match the opening call. + E128 diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/bidi_stream.py b/pubsub/google/cloud/pubsub_v1/subscriber/bidi_stream.py new file mode 100644 index 000000000000..c0b844ad2cb3 --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/bidi_stream.py @@ -0,0 +1,259 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Bidirectional Streaming Consumer. +The goal here is to consume a bidirectional streaming RPC by fanning out the +responses received from the server to be processed and fanning in requests from +the response processors to be sent to the server through the request stream. +This module is a framework to deal with this pattern in a consistent way: + + * A :class:`Consumer` manages scheduling requests to a stream and consuming + responses from a stream. The Consumer takes the responses and schedules + them to be processed in callbacks using any + :class:`~concurrent.futures.Executor`. + * A :class:`Policy` which determines how the consumer calls the RPC and + processes responses, errors, and messages. + +The :class:`Policy` is the only class that's intended to be sub-classed here. +This would be implemented for every bidirectional streaming method. +How does this work? The first part of the implementation, fanning out +responses, its actually quite straightforward and can be done with just a +:class:`concurrent.futures.Executor`: + +.. graphviz:: + digraph responses_only { + "gRPC C Core" -> "gRPC Python" [label="queue", dir="both"] + "gRPC Python" -> "Consumer" [label="responses", color="red"] + "Consumer" -> "Policy" [label="responses", color="red"] + "Policy" -> "futures.Executor" [label="response", color="red"] + "futures.Executor" -> "callback" [label="response", color="red"] + } + +The challenge comes from the fact that in bidirectional streaming two more +things have to be done: + + 1. The consumer must maintain a long-running request generator. + 2. The consumer must provide some way for the response processor to queue + new requests. + +These are especially important because in the case of Pub/Sub you are +essentially streaming requests indefinitely and receiving responses +indefinitely. + +For the first challenge, we take advantage of the fact that gRPC runs the +request generator in its own thread. That thread can block, so we can use +a queue for that: + +.. graphviz:: + digraph response_flow { + "gRPC C Core" -> "gRPC Python" [label="queue", dir="both"] + "gRPC Python" -> "Consumer" [label="responses", color="red"] + "Consumer" -> "request generator thread" [label="starts", color="gray"] + "request generator thread" -> "gRPC Python" + [label="requests", color="blue"] + } + +The final piece of the puzzle, allowing things from anywhere to queue new +requests, it a bit more complex. If we were only dealing with threads, then the +response workers could just directly interact with the policy/consumer to +queue new requests: + +.. graphviz:: + digraph thread_only_requests { + "gRPC C Core" -> "gRPC Python" [label="queue", dir="both"] + "gRPC Python" -> "Consumer" [label="responses", color="red"] + "Consumer" -> "request generator thread" [label="starts", color="gray"] + "request generator thread" -> "gRPC Python" + [label="requests", color="blue"] + "Consumer" -> "Policy" [label="responses", color="red"] + "Policy" -> "futures.Executor" [label="response", color="red"] + "futures.Executor" -> "callback" [label="response", color="red"] + "callback" -> "Consumer" [label="send_request", color="blue"] + } + +But, because this does not dictate any particular concurrent strategy for +dealing with the responses, it's possible that a response could be processed +in a different thread, process, or even on a different machine. Because of +this, we need an intermediary queue between the callbacks and the gRPC request +queue to bridge the "concurrecy gap". To pump items from the concurrecy-safe +queue into the gRPC request queue, we need another worker thread. Putting this +all together looks like this: + +.. graphviz:: + digraph responses_only { + "gRPC C Core" -> "gRPC Python" [label="queue", dir="both"] + "gRPC Python" -> "Consumer" [label="responses", color="red"] + "Consumer" -> "request generator thread" [label="starts", color="gray"] + "Policy" -> "QueueCallbackThread" [label="starts", color="gray"] + "request generator thread" -> "gRPC Python" + [label="requests", color="blue"] + "Consumer" -> "Policy" [label="responses", color="red"] + "Policy" -> "futures.Executor" [label="response", color="red"] + "futures.Executor" -> "callback" [label="response", color="red"] + "callback" -> "callback_request_queue" [label="requests", color="blue"] + "callback_request_queue" -> "QueueCallbackThread" + [label="consumed by", color="blue"] + "QueueCallbackThread" -> "Consumer" + [label="send_response", color="blue"] + } + +This part is actually up to the Policy to enable. The consumer just provides a +thread-safe queue for requests. The :cls:`QueueCallbackThread` can be used by +the Policy implementation to spin up the worker thread to pump the +concurrency-safe queue. See the Pub/Sub subscriber implementation for an +example of this. +""" + +import logging +import queue +import threading + +from google.cloud.pubsub_v1.subscriber import helper_threads + +_LOGGER = logging.getLogger(__name__) + + +class BidiStream(object): + """Bi-directional streaming RPC consumer. + + This class coordinates the consumption of a bi-directional streaming RPC. + There is a bit of background information to know before understanding how + this class operates: + + 1. gRPC has its own background thread for dealing with I/O. + 2. gRPC consumes a streaming call's request generator in another + thread. + 3. If the request generator thread exits, gRPC will close the + connection. + + Because of (2) and (3), the consumer must always at least use threading + for some bookkeeping. No matter what, a thread will be created by gRPC to + generate requests. This thread is called the *request generator thread*. + Having the request generator thread allows the consumer to hold the stream + open indefinitely. Now gRPC will send responses as fast as the consumer can + ask for them. The consumer hands these off to the :cls:`Policy` via + :meth:`Policy.on_response`, which should not block. + + Finally, we do not want to block the main thread, so the consumer actually + invokes the RPC itself in a separate thread. This thread is called the + *response consumer helper thread*. + + So all in all there are three threads: + + 1. gRPC's internal I/O thread. + 2. The request generator thread, created by gRPC. + 3. The response consumer helper thread, created by the Consumer. + + In addition, the Consumer likely uses some sort of concurreny to prevent + blocking on processing responses. The Policy may also use another thread to + deal with pumping messages from an external queue into the request queue + here. + + It may seem strange to use threads for something "high performance" + considering the GIL. However, the threads here are not CPU bound. They are + simple threads that are blocked by I/O and generally just move around some + simple objects between queues. The overhead for these helper threads is + low. The Consumer and end-user can configure any sort of executor they want + for the actual processing of the responses, which may be CPU intensive. + """ + + def __init__(self, consumer): + """ + Args: + consumer (Consumer): The consumer. + """ + self._consumer = consumer + self._request_queue = queue.Queue() + self._exiting = threading.Event() + + self.helper_threads = helper_threads.HelperThreadRegistry() + """:cls:`_helper_threads.HelperThreads`: manages the helper threads. + The policy may use this to schedule its own helper threads. + """ + + def send_request(self, request): + """Queue a request to be sent to gRPC. + Args: + request (Any): The request protobuf. + """ + self._request_queue.put(request) + + def _request_generator_thread(self): + """Generate requests for the stream. + + This blocks for new requests on the request queue and yields them to + gRPC. + """ + # Note: gRPC will run this in a separate thread. This can and must + # block to keep the stream open. + initial_request = self._consumer.on_initial_request() + if initial_request is not None: + _LOGGER.debug( + 'Sending initial request: {}'.format(initial_request), + ) + yield initial_request + + while True: + request = self._request_queue.get() + if request == helper_threads.STOP: + _LOGGER.debug('Request generator signaled to stop.') + break + + _LOGGER.debug('Sending request: {}'.format(request)) + yield request + + def _blocking_consume(self): + """Consume the stream indefinitely.""" + while True: + # It is possible that a timeout can cause the stream to not + # exit cleanly when the user has called stop_consuming(). This + # checks to make sure we're not exiting before opening a new + # stream. + if self._exiting.is_set(): + _LOGGER.debug('Event signalled consumer exit.') + break + + request_generator = self._request_generator_thread() + response_generator = self._consumer.call_rpc(request_generator) + try: + for response in response_generator: + self._policy.on_response(response) + + # If the loop above exits without an exception, then the + # request stream terminated cleanly, which should only happen + # when it was signaled to do so by stop_consuming. In this + # case, break out of the while loop and exit this thread. + _LOGGER.debug('Clean RPC loop exit signalled consumer exit.') + break + + except Exception as e: + self._policy.on_exception(e) + + def _consume_thread(self): + """Thread to consume the stream.""" + self._blocking_consume() + + def start_consuming(self): + """Start consuming the stream.""" + self._exiting.clear() + self.helper_threads.start('consume bidirectional stream', + self._request_queue, + self._consume_thread, + ) + self._consumer.initialize(self) + + def stop_consuming(self): + """Signal the stream to stop and block until it completes.""" + self._exiting.set() + self.helper_threads.stop_all() diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/client.py b/pubsub/google/cloud/pubsub_v1/subscriber/client.py index c2bf71c46f78..b2def0bb6409 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/client.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/client.py @@ -14,15 +14,11 @@ from __future__ import absolute_import -import functools import pkg_resources -import six - from google.cloud.gapic.pubsub.v1 import subscriber_client from google.cloud.pubsub_v1 import _gapic -from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.subscriber.consumer import mp diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py index 2b48b8a08578..633b96ed3bf2 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py @@ -18,12 +18,14 @@ import six +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.subscriber import bidi_stream from google.cloud.pubsub_v1.subscriber import histogram @six.add_metaclass(abc.ABCMeta) class BaseConsumer(object): - """Abstract base class for consumers. + """Abstract class defining a subscription consumer. Although the :class:`~.pubsub_v1.subscriber.consumer.mp.Consumer` class, based on :class:`multiprocessing.Process`, is fine for most cases, @@ -40,8 +42,9 @@ def __init__(self, client, subscription, histogram_data=None): Args: client (~.pubsub_v1.subscriber.client): The subscriber client used to create this instance. - subscription (str): The name of the subscription. The canonical format - for this is ``projects/{project}/subscriptions/{subscription}``. + subscription (str): The name of the subscription. The canonical + format for this is + ``projects/{project}/subscriptions/{subscription}``. histogram_data (dict): Optional: A structure to store the histogram data for predicting appropriate ack times. If set, this should be a dictionary-like object. @@ -55,6 +58,7 @@ def __init__(self, client, subscription, histogram_data=None): """ self._client = client self._subscription = subscription + self._bidi_stream = bidi_stream.BidiStream(self) self._ack_deadline = 10 self._last_histogram_size = 0 self.histogram = histogram.Histogram(data=histogram_data) @@ -74,6 +78,18 @@ def ack_deadline(self): self._ack_deadline = self.histogram.percentile(percent=99) return self._ack_deadline + @property + def initial_request(self): + """Return the initial request. + + This defines the intiial request that must always be sent to Pub/Sub + immediately upon opening the subscription. + """ + return types.StreamingPullRequest( + stream_ack_deadline_seconds=self.histogram.percentile(99), + subscription=self.subscription, + ) + @property def subscription(self): """Return the subscription. @@ -83,14 +99,65 @@ def subscription(self): """ return self._subscription - @abc.abstractmethod def ack(self, ack_id): """Acknowledge the message corresponding to the given ack_id.""" - raise NotImplementedError + request = types.StreamingPullRequest(ack_ids=[ack_id]) + self._bidi_stream.send_request(request) + + def call_rpc(self, request_generator): + """Invoke the Pub/Sub streaming pull RPC. + + Args: + request_generator (Generator): A generator that yields requests, + and blocks if there are no outstanding requests (until such + time as there are). + """ + return self._client.api.streaming_pull(request_generator) - @abc.abstractmethod def modify_ack_deadline(self, ack_id, seconds): """Modify the ack deadline for the given ack_id.""" + request = types.StreamingPullRequest( + modify_deadline_ack_ids=[ack_id], + modify_deadline_seconds=[seconds], + ) + self._bidi_stream.send_request(request) + + def nack(self, ack_id): + """Explicitly deny receipt of a message.""" + return self.modify_ack_deadline(ack_id, 0) + + @abc.abstractmethod + def on_response(self, response): + """Process a response from gRPC. + + This gives the consumer control over how responses are scheduled to + be processed. This method is expected to not block and instead + schedule the response to be consumed by some sort of concurrency. + + For example, if a the Policy implementation takes a callback in its + constructor, you can schedule the callback using a + :cls:`concurrent.futures.ThreadPoolExecutor`:: + + self._pool.submit(self._callback, response) + + This is called from the response consumer helper thread. + + Args: + response (Any): The protobuf response from the RPC. + """ + raise NotImplementedError + + @abc.abstractmethod + def on_exception(self, exception): + """Called when a gRPC exception occurs. + + If this method does nothing, then the stream is re-started. If this + raises an exception, it will stop the consumer thread. + This is executed on the response consumer helper thread. + + Args: + exception (Exception): The exception raised by the RPC. + """ raise NotImplementedError @abc.abstractmethod @@ -100,5 +167,9 @@ def open(self, callback): For each message received, the ``callback`` function is fired with a :class:`~.pubsub_v1.subscriber.message.Message` as its only argument. + + Args: + callback (Callable[Message]): A callable that receives a + Pub/Sub Message. """ raise NotImplementedError diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py index a1bdc6d2cc4e..1be7b519d1f3 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py @@ -14,16 +14,13 @@ from __future__ import absolute_import -import abc +from concurrent import futures import multiprocessing -import six - from google.gax.errors import GaxError from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.subscriber import exceptions -from google.cloud.pubsub_v1.subscriber import histogram from google.cloud.pubsub_v1.subscriber.consumer import base @@ -38,14 +35,17 @@ def __init__(self, client, subscription): self._manager = multiprocessing.Manager() self._shared = self._manager.Namespace() self._shared.subscription = subscription - self._shared.outgoing_requests = self._manager.list() self._shared.histogram_data = self._manager.dict() + self._shared.request_queue = self._manager.Queue() # Call the superclass constructor. super(Consumer, self).__init__(client, subscription, histogram_data=self._shared.histogram_data, ) + # Also maintain a request queue and an executor. + self._executor = futures.ProcessPoolExecutor() + # Keep track of the GRPC connection. self._process = None @@ -58,24 +58,11 @@ def subscription(self): """ return self._shared.subscription - def ack(self, ack_id): - """Acknowledge the message corresponding to the given ack_id.""" - self._shared.outgoing_requests.append(types.StreamingPullRequest( - ack_ids=[ack_id], - )) - def close(self): """Close the existing connection.""" self._process.terminate() self._process = None - def modify_ack_deadline(self, ack_id, seconds): - """Modify the ack deadline for the given ack_id.""" - self._shared.outgoing_requests.append(types.StreamingPullRequest( - modify_deadline_ack_ids=[ack_id], - modify_deadline_seconds=[seconds], - )) - def open(self, callback): """Open a streaming pull connection and begin receiving messages. @@ -106,12 +93,7 @@ def stream(self): subscription=self._subscription, )) - import sys try: outgoing = iter(self._shared.outgoing_requests) - import pdb ; pdb.set_trace() - for r in self._client.api.streaming_pull(outgoing): - import pdb ; pdb.set_trace() - print(r, file=sys.stderr) except GaxError: return self.stream() diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/helper_threads.py b/pubsub/google/cloud/pubsub_v1/subscriber/helper_threads.py new file mode 100644 index 000000000000..07ada2a0def3 --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/helper_threads.py @@ -0,0 +1,121 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import logging +import threading +import uuid + +import six + +__all__ = ( + 'HelperThreadRegistry', + 'QueueCallbackThread', + 'STOP', +) + +_LOGGER = logging.getLogger(__name__) + +_HelperThread = collections.namedtuple('HelperThreads', + ['name', 'thread', 'queue'], +) + + +# Helper thread stop indicator. This could be a sentinel object or None, +# but the sentinel object's ID can change if the process is forked, and +# None has the possibility of a user accidentally killing the helper +# thread. +STOP = uuid.uuid4() + + +class HelperThreadRegistry(object): + def __init__(self): + self._helper_threads = {} + + def start(self, name, queue, target, *args, **kwargs): + """Create and start a helper thread. + + Args: + name (str): The name of the helper thread. + queue (Queue): A concurrency-safe queue. + target (Callable): The target of the thread. + args: Additional args passed to the thread constructor. + kwargs: Additional kwargs passed to the thread constructor. + + Returns: + threading.Thread: The created thread. + """ + # Create and start the helper thread. + thread = threading.Thread( + name='Consumer helper: {}'.format(name), + target=target, + *args, **kwargs + ) + thread.daemon = True + thread.start() + + # Keep track of the helper thread, so we are able to stop it. + self._helper_threads[name] = _HelperThread(name, thread, queue) + _LOGGER.debug('Started helper thread {}'.format(name)) + return thread + + def stop(self, name): + """Stops a helper thread. + + Sends the stop message and blocks until the thread joins. + + Args: + name (str): The name of the thread. + """ + # Attempt to retrieve the thread; if it is gone already, no-op. + helper_thread = self._helper_threads.get(name) + if helper_thread.thread is None: + return + + # Join the thread if it is still alive. + if helper_thread.thread.is_alive(): + _LOGGER.debug('Stopping helper thread {}'.format(name)) + helper_thread.queue.put(STOP) + helper_thread.thread.join() + + # Remove the thread from our tracking. + self._helper_threads.pop(name, None) + + def stop_all(self): + """Stop all helper threads.""" + # This could be more efficient by sending the stop signal to all + # threads before joining any of them. + for name in list(six.iterkeys(self._helper_threads)): + self.stop(name) + + +class QueueCallbackThread(object): + """A helper thread that executes a callback for every item in + the queue. + """ + def __init__(self, queue, callback): + self.queue = queue + self._callback = callback + + def __call__(self): + while True: + item = self.queue.get() + if item == STOP: + break + + # This doesn't presently deal with exceptions that bubble up + # through the callback. If there is an error here, the thread will + # exit and no further queue items will be processed. We could + # potentially capture errors, log them, and then continue on. + self._callback(item) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py index 1433a5df6805..26b27ea5c4cb 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/message.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py @@ -56,9 +56,6 @@ def __init__(self, consumer, ack_id, message): self._ack_id = ack_id self._message = message self.message_id = message.message_id - self.data = message.data - self.attributes = message.attributes - self.publish_time = message.publish_time # The instantiation time is the time that this message # was received. Tracking this provides us a way to be smart about @@ -68,11 +65,11 @@ def __init__(self, consumer, ack_id, message): def __repr__(self): # Get an abbreviated version of the data. abbv_data = self._data - if len(answer) > 50: + if len(abbv_data) > 50: abbv_data = abbv_data[0:50] + b'...' # Return a useful representation. - answer = 'Message {\n' + answer = 'Message {\n' answer += ' data: {0!r}\n'.format(abbv_data) answer += ' attributes: {0!r}\n'.format(self.attributes) answer += '}' From 14f200af5fd4a3f5052b5cc3a8b8257828fd78fe Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 27 Jun 2017 14:51:40 -0700 Subject: [PATCH 10/63] WIP --- .../{bidi_stream.py => consumer.py} | 17 ++--- .../cloud/pubsub_v1/subscriber/message.py | 21 ++++--- .../{consumer => policy}/__init__.py | 0 .../subscriber/{consumer => policy}/base.py | 20 +++--- .../subscriber/{consumer => policy}/mp.py | 63 +++++++++---------- 5 files changed, 65 insertions(+), 56 deletions(-) rename pubsub/google/cloud/pubsub_v1/subscriber/{bidi_stream.py => consumer.py} (96%) rename pubsub/google/cloud/pubsub_v1/subscriber/{consumer => policy}/__init__.py (100%) rename pubsub/google/cloud/pubsub_v1/subscriber/{consumer => policy}/base.py (91%) rename pubsub/google/cloud/pubsub_v1/subscriber/{consumer => policy}/mp.py (62%) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/bidi_stream.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py similarity index 96% rename from pubsub/google/cloud/pubsub_v1/subscriber/bidi_stream.py rename to pubsub/google/cloud/pubsub_v1/subscriber/consumer.py index c0b844ad2cb3..eb74e7300a5d 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/bidi_stream.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py @@ -13,6 +13,7 @@ # limitations under the License. """Bidirectional Streaming Consumer. + The goal here is to consume a bidirectional streaming RPC by fanning out the responses received from the server to be processed and fanning in requests from the response processors to be sent to the server through the request stream. @@ -124,7 +125,7 @@ _LOGGER = logging.getLogger(__name__) -class BidiStream(object): +class Consumer(object): """Bi-directional streaming RPC consumer. This class coordinates the consumption of a bi-directional streaming RPC. @@ -168,12 +169,13 @@ class BidiStream(object): for the actual processing of the responses, which may be CPU intensive. """ - def __init__(self, consumer): + def __init__(self, policy): """ Args: - consumer (Consumer): The consumer. + policy (Consumer): The consumer policy, which defines how + requests and responses are handled. """ - self._consumer = consumer + self._policy = policy self._request_queue = queue.Queue() self._exiting = threading.Event() @@ -184,6 +186,7 @@ def __init__(self, consumer): def send_request(self, request): """Queue a request to be sent to gRPC. + Args: request (Any): The request protobuf. """ @@ -197,7 +200,7 @@ def _request_generator_thread(self): """ # Note: gRPC will run this in a separate thread. This can and must # block to keep the stream open. - initial_request = self._consumer.on_initial_request() + initial_request = self._policy.on_initial_request() if initial_request is not None: _LOGGER.debug( 'Sending initial request: {}'.format(initial_request), @@ -225,7 +228,7 @@ def _blocking_consume(self): break request_generator = self._request_generator_thread() - response_generator = self._consumer.call_rpc(request_generator) + response_generator = self._policy.call_rpc(request_generator) try: for response in response_generator: self._policy.on_response(response) @@ -251,7 +254,7 @@ def start_consuming(self): self._request_queue, self._consume_thread, ) - self._consumer.initialize(self) + self._policy.initialize(self) def stop_consuming(self): """Signal the stream to stop and block until it completes.""" diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py index 26b27ea5c4cb..92c32da75edb 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/message.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py @@ -42,17 +42,23 @@ class Message(object): publish_time (datetime): The time that this message was originally published. """ - def __init__(self, consumer, ack_id, message): + def __init__(self, policy, ack_id, message): """Construct the Message. + .. note:: + + This class should not be constructed directly; it is the + responsibility of :class:`BasePolicy` subclasses to do so. + Args: - consumer (~.pubsub_v1.subscriber.consumer.BaseConsumer): The - consumer which originally received this message. + policy (~.pubsub_v1.subscriber.policy.BasePolicy): The policy + that created this message, and understands how to handle + actions from that message (e.g. acks). ack_id (str): The ack_id received from Pub/Sub. message (~.pubsub_v1.types.PubsubMessage): The message received from Pub/Sub. """ - self._consumer = consumer + self._policy = policy self._ack_id = ack_id self._message = message self.message_id = message.message_id @@ -112,13 +118,14 @@ def ack(self): you receive the message again. .. warning:: + Acks in Pub/Sub are best effort. You should always ensure that your processing code is idempotent, as you may receive any given message more than once. """ time_to_ack = math.ceil(time.time() - self._received_timestamp) - self._consumer.histogram.add(time_to_ack) - self._consumer.ack(self._ack_id) + self._policy.histogram.add(time_to_ack) + self._policy.ack(self._ack_id) def modify_ack_deadline(self, seconds): """Set the deadline for acknowledgement to the given value. @@ -138,7 +145,7 @@ def modify_ack_deadline(self, seconds): to. This should be between 0 and 600. Due to network latency, values below 10 are advised against. """ - self._consumer.modify_ack_deadline(self._ack_id, seconds) + self._policy.modify_ack_deadline(self._ack_id, seconds) def nack(self): """Decline to acknowldge the given message. diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/__init__.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/__init__.py similarity index 100% rename from pubsub/google/cloud/pubsub_v1/subscriber/consumer/__init__.py rename to pubsub/google/cloud/pubsub_v1/subscriber/policy/__init__.py diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py similarity index 91% rename from pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py rename to pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index 633b96ed3bf2..88650e59b845 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -19,25 +19,25 @@ import six from google.cloud.pubsub_v1 import types -from google.cloud.pubsub_v1.subscriber import bidi_stream +from google.cloud.pubsub_v1.subscriber import consumer from google.cloud.pubsub_v1.subscriber import histogram @six.add_metaclass(abc.ABCMeta) -class BaseConsumer(object): - """Abstract class defining a subscription consumer. +class BasePolicy(object): + """Abstract class defining a subscription policy. - Although the :class:`~.pubsub_v1.subscriber.consumer.mp.Consumer` class, + Although the :class:`~.pubsub_v1.subscriber.policy.mp.Policy` class, based on :class:`multiprocessing.Process`, is fine for most cases, advanced users may need to implement something based on a different concurrency model. - This class defines the interface for the consumer implementation; - subclasses may be passed as the ``consumer_class`` argument to + This class defines the interface for the policy implementation; + subclasses may be passed as the ``policy_class`` argument to :class:`~.pubsub_v1.client.SubscriberClient`. """ def __init__(self, client, subscription, histogram_data=None): - """Instantiate the consumer. + """Instantiate the policy. Args: client (~.pubsub_v1.subscriber.client): The subscriber client used @@ -58,7 +58,7 @@ def __init__(self, client, subscription, histogram_data=None): """ self._client = client self._subscription = subscription - self._bidi_stream = bidi_stream.BidiStream(self) + self._consumer = consumer.Consumer(self) self._ack_deadline = 10 self._last_histogram_size = 0 self.histogram = histogram.Histogram(data=histogram_data) @@ -102,7 +102,7 @@ def subscription(self): def ack(self, ack_id): """Acknowledge the message corresponding to the given ack_id.""" request = types.StreamingPullRequest(ack_ids=[ack_id]) - self._bidi_stream.send_request(request) + self._consumer.send_request(request) def call_rpc(self, request_generator): """Invoke the Pub/Sub streaming pull RPC. @@ -120,7 +120,7 @@ def modify_ack_deadline(self, ack_id, seconds): modify_deadline_ack_ids=[ack_id], modify_deadline_seconds=[seconds], ) - self._bidi_stream.send_request(request) + self._consumer.send_request(request) def nack(self, ack_id): """Explicitly deny receipt of a message.""" diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py similarity index 62% rename from pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py rename to pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py index 1be7b519d1f3..356c06fe6d94 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer/mp.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py @@ -17,20 +17,21 @@ from concurrent import futures import multiprocessing -from google.gax.errors import GaxError - -from google.cloud.pubsub_v1 import types -from google.cloud.pubsub_v1.subscriber import exceptions +from google.cloud.pubsub_v1.subscriber import helper_threads from google.cloud.pubsub_v1.subscriber.consumer import base +from google.cloud.pubsub_v1.subscriber.message import Message -class Consumer(base.BaseConsumer): +class Policy(base.BasePolicy): """A consumer class based on :class:``multiprocessing.Process``. This consumer handles the connection to the Pub/Sub service and all of the concurrency needs. """ def __init__(self, client, subscription): + # Default the callback to a no-op; it is provided by `.open`. + self._callback = lambda message: None + # Create a manager for keeping track of shared state. self._manager = multiprocessing.Manager() self._shared = self._manager.Namespace() @@ -39,12 +40,16 @@ def __init__(self, client, subscription): self._shared.request_queue = self._manager.Queue() # Call the superclass constructor. - super(Consumer, self).__init__(client, subscription, + super(Policy, self).__init__(client, subscription, histogram_data=self._shared.histogram_data, ) # Also maintain a request queue and an executor. self._executor = futures.ProcessPoolExecutor() + self._callback_requests = helper_threads.QueueCallbackThread( + self._shared.request_queue, + self._on_callback_request, + ) # Keep track of the GRPC connection. self._process = None @@ -60,8 +65,7 @@ def subscription(self): def close(self): """Close the existing connection.""" - self._process.terminate() - self._process = None + self._consumer.helper_threads.stop('callback requests worker') def open(self, callback): """Open a streaming pull connection and begin receiving messages. @@ -73,27 +77,22 @@ def open(self, callback): Args: callback (function): The callback function. """ - # Sanity check: If the connection is already open, fail. - if self._process is not None: - raise exceptions.AlreadyOpen(self._subscription) - - # Open the request. - self._process = multiprocessing.Process(self.stream) - self._process.daemon = True - self._process.start() - - def stream(self): - """Stream data to and from the Cloud Pub/Sub service.""" - - # The streaming connection expects a series of StreamingPullRequest - # objects. The first one must specify the subscription and the - # ack deadline; prepend this to the list. - self._shared.outgoing_requests.insert(0, types.StreamingPullRequest( - stream_ack_deadline_seconds=self.ack_deadline, - subscription=self._subscription, - )) - - try: - outgoing = iter(self._shared.outgoing_requests) - except GaxError: - return self.stream() + self._callback = callback + self._consumer.helper_threads.start('callback requests worker', + self._shared.request_queue, + self._callback_requests, + ) + + def on_callback_request(self, callback_request): + """Map the callback request to the appropriate GRPC request.""" + action, args = callback_request[0], callback_request[1:] + getattr(self, action)(*args) + + def on_response(self, response): + """Process all received Pub/Sub messages. + + For each message, schedule a callback with the executor. + """ + for msg in response.received_messages: + message = Message(self, msg.ack_id, msg.message) + self._executor.submit(self._callback, message) From 6a7e846aad99af762bb5a7c14b65e3fc3a17ec09 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 28 Jun 2017 08:57:56 -0700 Subject: [PATCH 11/63] Implement lease management. --- .../cloud/pubsub_v1/subscriber/consumer.py | 8 +- .../cloud/pubsub_v1/subscriber/message.py | 31 ++++++- .../cloud/pubsub_v1/subscriber/policy/base.py | 89 ++++++++++++++++++- .../cloud/pubsub_v1/subscriber/policy/mp.py | 23 ++++- 4 files changed, 138 insertions(+), 13 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py index eb74e7300a5d..8e22396e548b 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py @@ -168,7 +168,6 @@ class Consumer(object): low. The Consumer and end-user can configure any sort of executor they want for the actual processing of the responses, which may be CPU intensive. """ - def __init__(self, policy): """ Args: @@ -243,18 +242,13 @@ def _blocking_consume(self): except Exception as e: self._policy.on_exception(e) - def _consume_thread(self): - """Thread to consume the stream.""" - self._blocking_consume() - def start_consuming(self): """Start consuming the stream.""" self._exiting.clear() self.helper_threads.start('consume bidirectional stream', self._request_queue, - self._consume_thread, + self._blocking_consume, ) - self._policy.initialize(self) def stop_consuming(self): """Signal the stream to stop and block until it completes.""" diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py index 92c32da75edb..5690fd1c6d0d 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/message.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py @@ -68,6 +68,10 @@ def __init__(self, policy, ack_id, message): # the default lease deadline. self._received_timestamp = time.time() + # The policy should lease this message, telling PubSub that it has + # it until it is acked or otherwise dropped. + self.lease() + def __repr__(self): # Get an abbreviated version of the data. abbv_data = self._data @@ -118,7 +122,6 @@ def ack(self): you receive the message again. .. warning:: - Acks in Pub/Sub are best effort. You should always ensure that your processing code is idempotent, as you may receive any given message more than once. @@ -126,6 +129,31 @@ def ack(self): time_to_ack = math.ceil(time.time() - self._received_timestamp) self._policy.histogram.add(time_to_ack) self._policy.ack(self._ack_id) + self.release() + + def drop(self): + """Release the message from lease management. + + This informs the policy to no longer hold on to the lease for this + message. Pub/Sub will re-deliver the message if it is not acknowledged + before the existing lease expires. + + .. warning:: + For most use cases, the only reason to drop a message from + lease management is on :meth:`ack` or :meth:`nack`; these methods + both call this one. You probably do not want to call this method + directly. + """ + self._policy.drop(self._ack_id) + + def lease(self): + """Inform the policy to lease this message continually. + + .. note:: + This method is called by the constructor, and you should never + need to call it manually. + """ + self._policy.lease(self._ack_id) def modify_ack_deadline(self, seconds): """Set the deadline for acknowledgement to the given value. @@ -153,3 +181,4 @@ def nack(self): This will cause the message to be re-delivered to the subscription. """ self.modify_ack_deadline(seconds=0) + self.release() diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index 88650e59b845..2dd68c8b6f3f 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -15,6 +15,8 @@ from __future__ import absolute_import import abc +import random +import time import six @@ -90,6 +92,17 @@ def initial_request(self): subscription=self.subscription, ) + @property + def managed_ack_ids(self): + """Return the ack IDs currently being managed by the policy. + + Returns: + set: The set of ack IDs being managed. + """ + if not hasattr(self, '_managed_ack_ids'): + self._managed_ack_ids = set() + return self._managed_ack_ids + @property def subscription(self): """Return the subscription. @@ -100,7 +113,11 @@ def subscription(self): return self._subscription def ack(self, ack_id): - """Acknowledge the message corresponding to the given ack_id.""" + """Acknowledge the message corresponding to the given ack_id. + + Args: + ack_id (str): The ack ID. + """ request = types.StreamingPullRequest(ack_ids=[ack_id]) self._consumer.send_request(request) @@ -114,8 +131,70 @@ def call_rpc(self, request_generator): """ return self._client.api.streaming_pull(request_generator) + def drop(self, ack_id): + """Remove the given ack ID from lease management. + + Args: + ack_id (str): The ack ID. + """ + self.managed_ack_ids.remove(ack_id) + + def lease(self, ack_id): + """Add the given ack ID to lease management. + + Args: + ack_id (str): The ack ID. + """ + self.managed_ack_ids.add(ack_id) + + def maintain_leases(self): + """Maintain all of the leases being managed by the policy. + + This method modifies the ack deadline for all of the managed + ack IDs, then waits for most of that time (but with jitter), and + then calls itself. + + .. warning:: + This method blocks, and generally should be run in a separate + thread or process. + + Additionally, you should not have to call this method yourself, + unless you are implementing your own policy. If you are + implementing your own policy, you _should_ call this method + in an appropriate form of subprocess. + """ + # Determine the appropriate duration for the lease. + # This is based off of how long previous messages have taken to ack, + # with a sensible default and within the ranges allowed by Pub/Sub. + p99 = self.histogram.percentile(99) + + # Create a streaming pull request. + # We do not actually call `modify_ack_deadline` over and over because + # it is more efficient to make a single request. + ack_ids = list(self.managed_ack_ids) + if len(ack_ids) > 0: + request = types.StreamingPullRequest( + modify_deadline_ack_ids=ack_ids, + modify_deadline_seconds=[p99] * len(ack_ids), + ) + self._consumer.send_request(request) + + # Now wait an appropriate period of time and do this again. + # + # We determine the appropriate period of time based on a random + # period between 0 seconds and 90% of the lease. This use of + # jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases + # where there are many clients. + time.sleep(random.uniform(0.0, p99 * 0.9)) + self.maintain_managed_leases() + def modify_ack_deadline(self, ack_id, seconds): - """Modify the ack deadline for the given ack_id.""" + """Modify the ack deadline for the given ack_id. + + Args: + ack_id (str): The ack ID + seconds (int): The number of seconds to set the new deadline to. + """ request = types.StreamingPullRequest( modify_deadline_ack_ids=[ack_id], modify_deadline_seconds=[seconds], @@ -123,7 +202,11 @@ def modify_ack_deadline(self, ack_id, seconds): self._consumer.send_request(request) def nack(self, ack_id): - """Explicitly deny receipt of a message.""" + """Explicitly deny receipt of a message. + + Args: + ack_id (str): The ack ID. + """ return self.modify_ack_deadline(ack_id, 0) @abc.abstractmethod diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py index 356c06fe6d94..c2c25bc830fa 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py @@ -36,6 +36,7 @@ def __init__(self, client, subscription): self._manager = multiprocessing.Manager() self._shared = self._manager.Namespace() self._shared.subscription = subscription + self._shared.managed_ack_ids = self._manager.set() self._shared.histogram_data = self._manager.dict() self._shared.request_queue = self._manager.Queue() @@ -51,8 +52,19 @@ def __init__(self, client, subscription): self._on_callback_request, ) - # Keep track of the GRPC connection. - self._process = None + # Spawn a process that maintains all of the leases for this policy. + self._lease_process = multiprocessing.Process(self.maintain_leases) + self._lease_process.daemon = True + self._lease_process.start() + + @property + def managed_ack_ids(self): + """Return the ack IDs currently being managed by the policy. + + Returns: + set: The set of ack IDs being managed. + """ + return self._shared.managed_ack_ids @property def subscription(self): @@ -88,6 +100,13 @@ def on_callback_request(self, callback_request): action, args = callback_request[0], callback_request[1:] getattr(self, action)(*args) + def on_exception(self, exception): + """Bubble the exception. + + This will cause the stream to exit loudly. + """ + raise exception + def on_response(self, response): """Process all received Pub/Sub messages. From 3bb130bd5b7493c764afbda5d5b1e4957ffb1b9e Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 29 Jun 2017 11:04:09 -0700 Subject: [PATCH 12/63] WIP --- .../cloud/pubsub_v1/subscriber/client.py | 14 +++---- .../cloud/pubsub_v1/subscriber/consumer.py | 13 +++--- .../cloud/pubsub_v1/subscriber/policy/base.py | 2 +- .../cloud/pubsub_v1/subscriber/policy/mp.py | 42 +++++++++++++------ 4 files changed, 42 insertions(+), 29 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/client.py b/pubsub/google/cloud/pubsub_v1/subscriber/client.py index b2def0bb6409..410d6ea130fc 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/client.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/client.py @@ -19,7 +19,7 @@ from google.cloud.gapic.pubsub.v1 import subscriber_client from google.cloud.pubsub_v1 import _gapic -from google.cloud.pubsub_v1.subscriber.consumer import mp +from google.cloud.pubsub_v1.subscriber.policy import mp __VERSION__ = pkg_resources.get_distribution('google-cloud-pubsub').version @@ -38,19 +38,19 @@ class SubscriberClient(object): flow_control (~.pubsub_v1.types.FlowControl): The flow control settings. Use this to prevent situations where you are inundated with too many messages at once. - consumer_class (class): A class that describes how to handle + policy_class (class): A class that describes how to handle subscriptions. You may subclass the - :class:`.pubsub_v1.subscriber.consumer.base.BaseConsumer` + :class:`.pubsub_v1.subscriber.policy.base.BasePolicy` class in order to define your own consumer. This is primarily provided to allow use of different concurrency models; the default is based on :class:`multiprocessing.Process`. - **kwargs (dict): Any additional arguments provided are sent as keyword + kwargs (dict): Any additional arguments provided are sent as keyword keyword arguments to the underlying :class:`~.gapic.pubsub.v1.subscriber_client.SubscriberClient`. Generally, you should not need to set additional keyword arguments. """ - def __init__(self, flow_control=(), consumer_class=mp.Consumer, + def __init__(self, flow_control=(), policy_class=mp.Policy, **kwargs): # Add the metrics headers, and instantiate the underlying GAPIC # client. @@ -60,7 +60,7 @@ def __init__(self, flow_control=(), consumer_class=mp.Consumer, # The subcription class is responsible to retrieving and dispatching # messages. - self._consumer_class = consumer_class + self._policy_class = policy_class def subscribe(self, subscription, callback=None): """Return a representation of an individual subscription. @@ -94,7 +94,7 @@ def subscribe(self, subscription, callback=None): ~.pubsub_v1.subscriber.consumer.base.BaseConsumer: An instance of the defined ``consumer_class`` on the client. """ - subscr = self._consumer_class(self, subscription) + subscr = self._policy_class(self, subscription) if callable(callback): subscr.open(callback) return subscr diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py index 8e22396e548b..2478196bd04f 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py @@ -197,15 +197,12 @@ def _request_generator_thread(self): This blocks for new requests on the request queue and yields them to gRPC. """ - # Note: gRPC will run this in a separate thread. This can and must - # block to keep the stream open. - initial_request = self._policy.on_initial_request() - if initial_request is not None: - _LOGGER.debug( - 'Sending initial request: {}'.format(initial_request), - ) - yield initial_request + # First, yield the initial request. This occurs on every new + # connection, fundamentally including a resumed connection. + yield self._policy.initial_request + # Now yield each of the items on the request queue, and block if there + # are none. This can and must block to keep the stream open. while True: request = self._request_queue.get() if request == helper_threads.STOP: diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index 2dd68c8b6f3f..3ddc072f2925 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -186,7 +186,7 @@ def maintain_leases(self): # jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases # where there are many clients. time.sleep(random.uniform(0.0, p99 * 0.9)) - self.maintain_managed_leases() + self.maintain_leases() def modify_ack_deadline(self, ack_id, seconds): """Modify the ack deadline for the given ack_id. diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py index c2c25bc830fa..896914c46305 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py @@ -15,13 +15,25 @@ from __future__ import absolute_import from concurrent import futures +from multiprocessing import managers +import logging import multiprocessing from google.cloud.pubsub_v1.subscriber import helper_threads -from google.cloud.pubsub_v1.subscriber.consumer import base +from google.cloud.pubsub_v1.subscriber.policy import base from google.cloud.pubsub_v1.subscriber.message import Message +logger = logging.getLogger(__name__) + + +# Allow sets to be able to be run through the managers; ensure they are +# iterable and have add/remove. +managers.SyncManager.register('set', set, + exposed=('__contains__', '__iter__', 'add', 'remove'), +) + + class Policy(base.BasePolicy): """A consumer class based on :class:``multiprocessing.Process``. @@ -34,26 +46,28 @@ def __init__(self, client, subscription): # Create a manager for keeping track of shared state. self._manager = multiprocessing.Manager() - self._shared = self._manager.Namespace() - self._shared.subscription = subscription - self._shared.managed_ack_ids = self._manager.set() - self._shared.histogram_data = self._manager.dict() - self._shared.request_queue = self._manager.Queue() + self._shared = self._manager.Namespace(subscription=subscription) + self._managed_ack_ids = self._manager.set() + self._request_queue = self._manager.Queue() # Call the superclass constructor. super(Policy, self).__init__(client, subscription, - histogram_data=self._shared.histogram_data, + histogram_data=self._manager.dict(), ) # Also maintain a request queue and an executor. + logger.debug('Creating callback requests thread (not starting).') self._executor = futures.ProcessPoolExecutor() self._callback_requests = helper_threads.QueueCallbackThread( - self._shared.request_queue, - self._on_callback_request, + self._request_queue, + self.on_callback_request, ) # Spawn a process that maintains all of the leases for this policy. - self._lease_process = multiprocessing.Process(self.maintain_leases) + logger.debug('Spawning lease process.') + self._lease_process = multiprocessing.Process( + target=self.maintain_leases, + ) self._lease_process.daemon = True self._lease_process.start() @@ -64,7 +78,7 @@ def managed_ack_ids(self): Returns: set: The set of ack IDs being managed. """ - return self._shared.managed_ack_ids + return self._managed_ack_ids @property def subscription(self): @@ -87,13 +101,15 @@ def open(self, callback): argument. Args: - callback (function): The callback function. + callback (Callable): The callback function. """ + logger.debug('Starting callback requests worker.') self._callback = callback self._consumer.helper_threads.start('callback requests worker', - self._shared.request_queue, + self._request_queue, self._callback_requests, ) + self._consumer.start_consuming() def on_callback_request(self, callback_request): """Map the callback request to the appropriate GRPC request.""" From f97dc237d5ae79ec184b87024c197891f19527ea Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 29 Jun 2017 12:21:40 -0700 Subject: [PATCH 13/63] WIP --- pubsub/google/cloud/pubsub_v1/subscriber/consumer.py | 1 + pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py index 2478196bd04f..5cc583c7e5ec 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py @@ -227,6 +227,7 @@ def _blocking_consume(self): response_generator = self._policy.call_rpc(request_generator) try: for response in response_generator: + print(response) self._policy.on_response(response) # If the loop above exits without an exception, then the diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index 3ddc072f2925..caa8c3580313 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -20,6 +20,8 @@ import six +from google import gax + from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.subscriber import consumer from google.cloud.pubsub_v1.subscriber import histogram @@ -129,7 +131,9 @@ def call_rpc(self, request_generator): and blocks if there are no outstanding requests (until such time as there are). """ - return self._client.api.streaming_pull(request_generator) + return self._client.api.streaming_pull(request_generator, + options=gax.CallOptions(timeout=600), + ) def drop(self, ack_id): """Remove the given ack ID from lease management. From 303436c430864364f571a05acc698a6b1f57d2b3 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 5 Jul 2017 08:18:18 -0700 Subject: [PATCH 14/63] WIP --- pubsub/google/cloud/pubsub_v1/subscriber/consumer.py | 4 ++-- pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py index 5cc583c7e5ec..2026b30f3f2c 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py @@ -227,7 +227,6 @@ def _blocking_consume(self): response_generator = self._policy.call_rpc(request_generator) try: for response in response_generator: - print(response) self._policy.on_response(response) # If the loop above exits without an exception, then the @@ -236,7 +235,8 @@ def _blocking_consume(self): # case, break out of the while loop and exit this thread. _LOGGER.debug('Clean RPC loop exit signalled consumer exit.') break - + except KeyboardInterrupt: + self.stop_consuming() except Exception as e: self._policy.on_exception(e) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py index 896914c46305..76129707cdd1 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py @@ -19,6 +19,8 @@ import logging import multiprocessing +import grpc + from google.cloud.pubsub_v1.subscriber import helper_threads from google.cloud.pubsub_v1.subscriber.policy import base from google.cloud.pubsub_v1.subscriber.message import Message @@ -121,6 +123,13 @@ def on_exception(self, exception): This will cause the stream to exit loudly. """ + # If this is DEADLINE_EXCEEDED, then we want to retry. + # That entails just returning None. + deadline_exceeded = grpc.StatusCode.DEADLINE_EXCEEDED + if getattr(exception, 'code', lambda: None)() == deadline_exceeded: + return + + # Raise any other exception. raise exception def on_response(self, response): From 933d2f3589195887dbf735bbd14d2f1a5eaf5e91 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 13 Jul 2017 11:17:42 -0700 Subject: [PATCH 15/63] WIP --- .../cloud/pubsub_v1/publisher/batch/base.py | 122 +++++++++--- .../publisher/batch/{mp.py => thread.py} | 182 ++++++++++-------- .../cloud/pubsub_v1/publisher/client.py | 31 ++- .../cloud/pubsub_v1/subscriber/client.py | 6 +- .../cloud/pubsub_v1/subscriber/consumer.py | 5 +- .../cloud/pubsub_v1/subscriber/histogram.py | 2 +- .../cloud/pubsub_v1/subscriber/message.py | 4 +- .../cloud/pubsub_v1/subscriber/policy/base.py | 15 +- .../subscriber/policy/{mp.py => thread.py} | 47 +---- 9 files changed, 235 insertions(+), 179 deletions(-) rename pubsub/google/cloud/pubsub_v1/publisher/batch/{mp.py => thread.py} (69%) rename pubsub/google/cloud/pubsub_v1/subscriber/policy/{mp.py => thread.py} (75%) diff --git a/pubsub/google/cloud/pubsub_v1/publisher/batch/base.py b/pubsub/google/cloud/pubsub_v1/publisher/batch/base.py index d0e9e3885d2a..263c18e56a80 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/batch/base.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/batch/base.py @@ -24,8 +24,8 @@ class BaseBatch(object): """The base batching class for Pub/Sub publishing. - Although the :class:`~.pubsub_v1.publisher.batch.mp.Batch` class, based - on :class:`multiprocessing.Process`, is fine for most cases, advanced + Although the :class:`~.pubsub_v1.publisher.batch.thread.Batch` class, based + on :class:`threading.Thread`, is fine for most cases, advanced users may need to implement something based on a different concurrency model. @@ -33,6 +33,10 @@ class BaseBatch(object): subclasses may be passed as the ``batch_class`` argument to :class:`~.pubsub_v1.client.PublisherClient`. """ + def __len__(self): + """Return the number of messages currently in the batch.""" + return len(self.messages) + @property @abc.abstractmethod def client(self): @@ -44,47 +48,95 @@ def client(self): raise NotImplementedError @property + @abc.abstractmethod + def client(self): + """Return the client used to create this batch. + + Returns: + ~.pubsub_v1.client.PublisherClient: A publisher client. + """ + raise NotImplementedError + + @property + @abc.abstractmethod + def messages(self): + """Return the messages currently in the batch. + + Returns: + Sequence: The messages currently in the batch. + """ + raise NotImplementedError + + @property + @abc.abstractmethod + def size(self): + """Return the total size of all of the messages currently in the batch. + + Returns: + int: The total size of all of the messages currently + in the batch, in bytes. + """ + raise NotImplementedError + + @property + @abc.abstractmethod + def settings(self): + """Return the batch settings. + + Returns: + ~.pubsub_v1.types.BatchSettings: The batch settings. These are + considered immutable once the batch has been opened. + """ + raise NotImplementedError + + @property + @abc.abstractmethod def status(self): """Return the status of this batch. Returns: str: The status of this batch. All statuses are human-readable, - all-lowercase strings, and represented in the - :class:`BaseBatch.Status` enum. + all-lowercase strings. The ones represented in the + :class:`BaseBatch.Status` enum are special, but other statuses + are permitted. """ raise NotImplementedError - def publish(self, data, **attrs): - """Publish a single message. + def will_accept(self, message): + """Return True if the batch is able to accept the message. + + Args: + message (~.pubsub_v1.types.PubsubMessage): The Pub/Sub message. + + Returns: + bool: Whether this batch can accept the message. + """ + # If this batch is not accepting messages generally, return False. + if self.status != self.Status.ACCEPTING_MESSAGES: + return False - .. note:: - Messages in Pub/Sub are blobs of bytes. They are *binary* data, - not text. You must send data as a bytestring - (``bytes`` in Python 3; ``str`` in Python 2), and this library - will raise an exception if you send a text string. + # If this batch can not hold the message in question, return False. + if self.size + message.ByteSize() > self.settings.max_bytes: + return False - The reason that this is so important (and why we do not try to - coerce for you) is because Pub/Sub is also platform independent - and there is no way to know how to decode messages properly on - the other side; therefore, encoding and decoding is a required - exercise for the developer. + # Okay, everything is good. + return True + + @abc.abstractmethod + def publish(self, message): + """Publish a single message. Add the given message to this object; this will cause it to be published once the batch either has enough messages or a sufficient period of time has elapsed. - Args: - data (bytes): A bytestring representing the message body. This - must be a bytestring (a text string will raise TypeError). - attrs (Mapping[str, str]): A dictionary of attributes to be - sent as metadata. (These may be text strings or byte strings.) + This method is called by :meth:`~.PublisherClient.publish`. - Raises: - TypeError: If the ``data`` sent is not a bytestring, or if the - ``attrs`` are not either a ``str`` or ``bytes``. + Args: + message (~.pubsub_v1.types.PubsubMessage): The Pub/Sub message. Returns: - ~.pubsub_v1.publisher.future.Future: An object conforming to the + ~.pubsub_v1.publisher.batch.mp.Future: An object conforming to the :class:`concurrent.futures.Future` interface. """ raise NotImplementedError @@ -101,7 +153,19 @@ class Status(object): SUCCESS = 'success' -# Make a fake batch. This is used by the client to do single-op checks -# for batch existence. -FakeBatch = collections.namedtuple('FakeBatch', ['status']) -FAKE = FakeBatch(status='fake') +class RejectionBatch(object): + """A fake batch-like object that refuses to accept any message. + + This is used by the client to do single-op checks for batch + existence. + """ + def will_accept(self, message): + """Return False. + + Args: + message (~.pubsub_v1.types.PubsubMessage): The Pub/Sub message. + + Returns: + bool: Whether this batch can accept the message. It never can. + """ + return False diff --git a/pubsub/google/cloud/pubsub_v1/publisher/batch/mp.py b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py similarity index 69% rename from pubsub/google/cloud/pubsub_v1/publisher/batch/mp.py rename to pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py index de6b9005c79b..95418dc9cada 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/batch/mp.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py @@ -14,13 +14,10 @@ from __future__ import absolute_import -import copy -import multiprocessing +import threading import time import uuid -import six - from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.publisher import exceptions from google.cloud.pubsub_v1.publisher.batch import base @@ -53,7 +50,7 @@ class Batch(base.BaseBatch): create this batch. topic (str): The topic. The format for this is ``projects/{project}/topics/{topic}``. - settings (~.pubsub_v1.types.Batching): The settings for batch + settings (~.pubsub_v1.types.BatchSettings): The settings for batch publishing. These should be considered immutable once the batch has been opened. autocommit (bool): Whether to autocommit the batch when the time @@ -62,27 +59,26 @@ class Batch(base.BaseBatch): """ def __init__(self, client, topic, settings, autocommit=True): self._client = client - self._manager = multiprocessing.Manager() # Create a namespace that is owned by the client manager; this # is necessary to be able to have these values be communicable between # processes. - self._shared = self.manager.Namespace() - self._shared.futures = self.manager.list() - self._shared.messages = self.manager.list() - self._shared.message_ids = self.manager.dict() - self._shared.settings = settings - self._shared.status = self.Status.ACCEPTING_MESSAGES - self._shared.topic = topic + self._futures = [] + self._messages = [] + self._size = 0 + self._message_ids = {} + self._settings = settings + self._status = self.Status.ACCEPTING_MESSAGES + self._topic = topic # This is purely internal tracking. - self._process = None + self._thread = None # Continually monitor the thread until it is time to commit the # batch, or the batch is explicitly committed. - if autocommit and self._shared.settings.max_latency < float('inf'): - self._process = multiprocessing.Process(target=self.monitor) - self._process.start() + if autocommit and self._settings.max_latency < float('inf'): + self._thread = threading.Thread(target=self.monitor) + self._thread.start() @property def client(self): @@ -94,14 +90,33 @@ def client(self): return self._client @property - def manager(self): - """Return the client's manager. + def messages(self): + """Return the messages currently in the batch. + + Returns: + Sequence: The messages currently in the batch. + """ + return self._messages + + @property + def settings(self): + """Return the batch settings. + + Returns: + ~.pubsub_v1.types.BatchSettings: The batch settings. These are + considered immutable once the batch has been opened. + """ + return self._settings + + @property + def size(self): + """Return the total size of all of the messages currently in the batch. Returns: - :class:`multiprocessing.Manager`: The manager responsible for - handling shared memory objects. + int: The total size of all of the messages currently + in the batch, in bytes. """ - return self._manager + return self._size @property def status(self): @@ -111,24 +126,51 @@ def status(self): str: The status of this batch. All statuses are human-readable, all-lowercase strings. """ - return self._shared.status + return self._status def commit(self): """Actually publish all of the messages on the active batch. + This synchronously sets the batch status to in-flight, and then opens + a new thread, which handles actually sending the messages to Pub/Sub. + + .. note:: + + This method is non-blocking. It opens a new thread, which calls + :meth:`_commit`, which does block. + """ + # Set the status to in-flight synchronously, to ensure that + # this batch will necessarily not accept new messages. + # + # Yes, this is repeated in `_commit`, because that method is called + # directly by `monitor`. + self._status = 'in-flight' + + # Start a new thread to actually handle the commit. + commit_thread = threading.Thread(target=self._commit) + commit_thread.start() + + def _commit(self): + """Actually publish all of the messages on the active batch. + This moves the batch out from being the active batch to an in-flight batch on the publisher, and then the batch is discarded upon completion. + + .. note:: + + This method blocks. The :meth:`commit` method is the non-blocking + version, which calls this one. """ # Update the status. - self._shared.status = 'in-flight' + self._status = 'in-flight' # Begin the request to publish these messages. - if len(self._shared.messages) == 0: + if len(self._messages) == 0: raise Exception('Empty queue') response = self._client.api.publish( - self._shared.topic, - self._shared.messages, + self._topic, + self.messages, ) # FIXME (lukesneeringer): Check for failures; retry. @@ -138,7 +180,7 @@ def commit(self): # Sanity check: If the number of message IDs is not equal to the # number of futures I have, then something went wrong. - if len(response.message_ids) != len(self._shared.futures): + if len(response.message_ids) != len(self._futures): raise exceptions.PublishError( 'Some messages were not successfully published.', ) @@ -146,9 +188,9 @@ def commit(self): # Iterate over the futures on the queue and return the response IDs. # We are trusting that there is a 1:1 mapping, and raise an exception # if not. - self._shared.status = self.Status.SUCCESS - for message_id, fut in zip(response.message_ids, self._shared.futures): - self._shared.message_ids[hash(fut)] = message_id + self._status = self.Status.SUCCESS + for message_id, fut in zip(response.message_ids, self._futures): + self._message_ids[hash(fut)] = message_id fut._trigger() def monitor(self): @@ -161,74 +203,46 @@ def monitor(self): # in a separate thread. # # Sleep for however long we should be waiting. - time.sleep(self._shared.settings.max_latency) + time.sleep(self._settings.max_latency) # If, in the intervening period, the batch started to be committed, # then no-op at this point. - if self._shared.status != self.Status.ACCEPTING_MESSAGES: + if self._status != self.Status.ACCEPTING_MESSAGES: return # Commit. - return self.commit() + return self._commit() - def publish(self, data, **attrs): + def publish(self, message): """Publish a single message. - .. note:: - Messages in Pub/Sub are blobs of bytes. They are *binary* data, - not text. You must send data as a bytestring - (``bytes`` in Python 3; ``str`` in Python 2), and this library - will raise an exception if you send a text string. - - The reason that this is so important (and why we do not try to - coerce for you) is because Pub/Sub is also platform independent - and there is no way to know how to decode messages properly on - the other side; therefore, encoding and decoding is a required - exercise for the developer. - Add the given message to this object; this will cause it to be published once the batch either has enough messages or a sufficient period of time has elapsed. - Args: - data (bytes): A bytestring representing the message body. This - must be a bytestring (a text string will raise TypeError). - attrs (Mapping[str, str]): A dictionary of attributes to be - sent as metadata. (These may be text strings or byte strings.) + This method is called by :meth:`~.PublisherClient.publish`. - Raises: - TypeError: If the ``data`` sent is not a bytestring, or if the - ``attrs`` are not either a ``str`` or ``bytes``. + Args: + message (~.pubsub_v1.types.PubsubMessage): The Pub/Sub message. Returns: ~.pubsub_v1.publisher.batch.mp.Future: An object conforming to the :class:`concurrent.futures.Future` interface. """ - # Sanity check: Is the data being sent as a bytestring? - # If it is literally anything else, complain loudly about it. - if not isinstance(data, six.binary_type): - raise TypeError('Data being published to Pub/Sub must be sent ' - 'as a bytestring.') - - # Coerce all attributes to text strings. - for k, v in copy.copy(attrs).items(): - if isinstance(data, six.text_type): - continue - if isinstance(data, six.binary_type): - attrs[k] = v.decode('utf-8') - continue - raise TypeError('All attributes being published to Pub/Sub must ' - 'be sent as text strings.') + # Coerce the type, just in case. + message = types.PubsubMessage(message) + + # Add the size to the running total of the size, so we know + # if future messages need to be rejected. + self._size += message.ByteSize() # Store the actual message in the batch's message queue. - self._shared.messages.append( - types.PubsubMessage(data=data, attributes=attrs), - ) + self._messages.append(message) # Return a Future. That future needs to be aware of the status # of this batch. - f = Future(self._shared) - self._shared.futures.append(f) + f = Future(self) + self._futures.append(f) return f @@ -242,11 +256,11 @@ class Future(object): methods in this library. Args: - batch (:class:`multiprocessing.Namespace`): Information about the - batch object that is committing this message. + batch (:class:`~.Batch`): The batch object that is committing + this message. """ - def __init__(self, batch_info): - self._batch_info = batch_info + def __init__(self, batch): + self._batch = batch self._callbacks = [] self._hash = hash(uuid.uuid4()) @@ -280,7 +294,7 @@ def done(self): This still returns True in failure cases; checking `result` or `exception` is the canonical way to assess success or failure. """ - return self._batch_info.status in ('success', 'error') + return self._batch.status in ('success', 'error') def result(self, timeout=None): """Return the message ID, or raise an exception. @@ -302,7 +316,7 @@ def result(self, timeout=None): # return an appropriate value. err = self.exception(timeout=timeout) if err is None: - return self._batch_info.message_ids[hash(self)] + return self._batch.message_ids[hash(self)] raise err def exception(self, timeout=None, _wait=1): @@ -322,12 +336,12 @@ def exception(self, timeout=None, _wait=1): :class:`Exception`: The exception raised by the call, if any. """ # If the batch completed successfully, this should return None. - if self._batch_info.status == 'success': + if self._batch.status == 'success': return None # If this batch had an error, this should return it. - if self._batch_info.status == 'error': - return self._batch_info.error + if self._batch.status == 'error': + return self._batch.error # If the timeout has been exceeded, raise TimeoutError. if timeout and timeout < 0: diff --git a/pubsub/google/cloud/pubsub_v1/publisher/client.py b/pubsub/google/cloud/pubsub_v1/publisher/client.py index 4941326c02a5..5ebbceec84e6 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/client.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/client.py @@ -15,7 +15,6 @@ from __future__ import absolute_import import copy -import functools import pkg_resources import six @@ -25,7 +24,7 @@ from google.cloud.pubsub_v1 import _gapic from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.publisher.batch import base -from google.cloud.pubsub_v1.publisher.batch import mp +from google.cloud.pubsub_v1.publisher.batch import thread __VERSION__ = pkg_resources.get_distribution('google-cloud-pubsub').version @@ -47,13 +46,13 @@ class PublisherClient(object): :class:`.pubsub_v1.publisher.batch.base.BaseBatch` class in order to define your own batcher. This is primarily provided to allow use of different concurrency models; the default - is based on :class:`multiprocessing.Process`. + is based on :class:`threading.Thread`. kwargs (dict): Any additional arguments provided are sent as keyword arguments to the underlying :class:`~.gapic.pubsub.v1.publisher_client.PublisherClient`. Generally, you should not need to set additional keyword arguments. """ - def __init__(self, batch_settings=(), batch_class=mp.Batch, **kwargs): + def __init__(self, batch_settings=(), batch_class=thread.Batch, **kwargs): # Add the metrics headers, and instantiate the underlying GAPIC # client. kwargs['lib_name'] = 'gccl' @@ -66,23 +65,19 @@ def __init__(self, batch_settings=(), batch_class=mp.Batch, **kwargs): self._batch_class = batch_class self._batches = {} - @property - def concurrency(self): - """Return the concurrency strategy instance. + # Instantiate the "rejection batch", which is used for single-op + # acceptance checks if no batch is present. + self._rejection = base.RejectionBatch() - Returns: - ~.pubsub_v1.concurrency.base.PublishStrategy: The class responsible - for handling publishing concurrency. - """ - return self._concurrency - - def batch(self, topic, create=True, autocommit=True): + def batch(self, topic, message, create=True, autocommit=True): """Return the current batch. This will create a new batch only if no batch currently exists. Args: topic (str): A string representing the topic. + message (~.pubsub_v1.types.PubsubMessage): The message that will + be committed. create (bool): Whether to create a new batch if no batch is found. Defaults to True. autocommit (bool): Whether to autocommit this batch. @@ -93,8 +88,7 @@ def batch(self, topic, create=True, autocommit=True): """ # If there is no matching batch yet, then potentially create one # and place it on the batches dictionary. - accepting = base.BaseBatch.Status.ACCEPTING_MESSAGES - if self._batches.get(topic, base.FAKE).status != accepting: + if not self._batches.get(topic, self._rejection).will_accept(message): if not create: return None self._batches[topic] = self._batch_class( @@ -161,5 +155,8 @@ def publish(self, topic, data, **attrs): raise TypeError('All attributes being published to Pub/Sub must ' 'be sent as text strings.') + # Create the Pub/Sub message object. + message = types.PubsubMessage(data=data, attributes=attrs) + # Delegate the publishing to the batch. - return self.batch(topic).publish(data, *attrs) + return self.batch(topic, message=message).publish(message) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/client.py b/pubsub/google/cloud/pubsub_v1/subscriber/client.py index 410d6ea130fc..0a08bb9f4f2b 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/client.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/client.py @@ -19,7 +19,7 @@ from google.cloud.gapic.pubsub.v1 import subscriber_client from google.cloud.pubsub_v1 import _gapic -from google.cloud.pubsub_v1.subscriber.policy import mp +from google.cloud.pubsub_v1.subscriber.policy import thread __VERSION__ = pkg_resources.get_distribution('google-cloud-pubsub').version @@ -43,14 +43,14 @@ class SubscriberClient(object): :class:`.pubsub_v1.subscriber.policy.base.BasePolicy` class in order to define your own consumer. This is primarily provided to allow use of different concurrency models; the default - is based on :class:`multiprocessing.Process`. + is based on :class:`threading.Thread`. kwargs (dict): Any additional arguments provided are sent as keyword keyword arguments to the underlying :class:`~.gapic.pubsub.v1.subscriber_client.SubscriberClient`. Generally, you should not need to set additional keyword arguments. """ - def __init__(self, flow_control=(), policy_class=mp.Policy, + def __init__(self, flow_control=(), policy_class=thread.Policy, **kwargs): # Add the metrics headers, and instantiate the underlying GAPIC # client. diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py index 2026b30f3f2c..8add5aef23c4 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py @@ -227,6 +227,7 @@ def _blocking_consume(self): response_generator = self._policy.call_rpc(request_generator) try: for response in response_generator: + _LOGGER.debug('Received response: {0}'.format(response)) self._policy.on_response(response) # If the loop above exits without an exception, then the @@ -237,8 +238,8 @@ def _blocking_consume(self): break except KeyboardInterrupt: self.stop_consuming() - except Exception as e: - self._policy.on_exception(e) + except Exception as exc: + self._policy.on_exception(exc) def start_consuming(self): """Start consuming the stream.""" diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py b/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py index b5df134260e4..3f8b64ed9f73 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py @@ -148,7 +148,7 @@ def percentile(self, percent): for k in reversed(list(self._data.keys())): target -= self._data[k] if target <= 0: - return self._data[k] + return k # The only way to get here is if there was no data. # In this case, just return 10 seconds. diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py index 5690fd1c6d0d..e2bf9415f9a8 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/message.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py @@ -129,7 +129,7 @@ def ack(self): time_to_ack = math.ceil(time.time() - self._received_timestamp) self._policy.histogram.add(time_to_ack) self._policy.ack(self._ack_id) - self.release() + self.drop() def drop(self): """Release the message from lease management. @@ -181,4 +181,4 @@ def nack(self): This will cause the message to be re-delivered to the subscription. """ self.modify_ack_deadline(seconds=0) - self.release() + self.drop() diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index caa8c3580313..b740c0829865 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -15,6 +15,7 @@ from __future__ import absolute_import import abc +import logging import random import time @@ -26,13 +27,15 @@ from google.cloud.pubsub_v1.subscriber import consumer from google.cloud.pubsub_v1.subscriber import histogram +logger = logging.getLogger(__name__) + @six.add_metaclass(abc.ABCMeta) class BasePolicy(object): """Abstract class defining a subscription policy. - Although the :class:`~.pubsub_v1.subscriber.policy.mp.Policy` class, - based on :class:`multiprocessing.Process`, is fine for most cases, + Although the :class:`~.pubsub_v1.subscriber.policy.thread.Policy` class, + based on :class:`threading.Thread`, is fine for most cases, advanced users may need to implement something based on a different concurrency model. @@ -86,7 +89,7 @@ def ack_deadline(self): def initial_request(self): """Return the initial request. - This defines the intiial request that must always be sent to Pub/Sub + This defines the initial request that must always be sent to Pub/Sub immediately upon opening the subscription. """ return types.StreamingPullRequest( @@ -171,11 +174,13 @@ def maintain_leases(self): # This is based off of how long previous messages have taken to ack, # with a sensible default and within the ranges allowed by Pub/Sub. p99 = self.histogram.percentile(99) + logger.debug('The current p99 value is %d seconds.' % p99) # Create a streaming pull request. # We do not actually call `modify_ack_deadline` over and over because # it is more efficient to make a single request. ack_ids = list(self.managed_ack_ids) + logger.debug('Renewing lease for %d ack IDs.' % len(ack_ids)) if len(ack_ids) > 0: request = types.StreamingPullRequest( modify_deadline_ack_ids=ack_ids, @@ -189,7 +194,9 @@ def maintain_leases(self): # period between 0 seconds and 90% of the lease. This use of # jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases # where there are many clients. - time.sleep(random.uniform(0.0, p99 * 0.9)) + snooze = random.uniform(0.0, p99 * 0.9) + logger.debug('Snoozing lease management for %f seconds.' % snooze) + time.sleep(snooze) self.maintain_leases() def modify_ack_deadline(self, ack_id, seconds): diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py similarity index 75% rename from pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py rename to pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py index 76129707cdd1..329cba9e3d86 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/mp.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -15,9 +15,9 @@ from __future__ import absolute_import from concurrent import futures -from multiprocessing import managers +import queue import logging -import multiprocessing +import threading import grpc @@ -29,15 +29,8 @@ logger = logging.getLogger(__name__) -# Allow sets to be able to be run through the managers; ensure they are -# iterable and have add/remove. -managers.SyncManager.register('set', set, - exposed=('__contains__', '__iter__', 'add', 'remove'), -) - - class Policy(base.BasePolicy): - """A consumer class based on :class:``multiprocessing.Process``. + """A consumer class based on :class:``threading.Thread``. This consumer handles the connection to the Pub/Sub service and all of the concurrency needs. @@ -47,19 +40,15 @@ def __init__(self, client, subscription): self._callback = lambda message: None # Create a manager for keeping track of shared state. - self._manager = multiprocessing.Manager() - self._shared = self._manager.Namespace(subscription=subscription) - self._managed_ack_ids = self._manager.set() - self._request_queue = self._manager.Queue() + self._managed_ack_ids = set() + self._request_queue = queue.Queue() # Call the superclass constructor. - super(Policy, self).__init__(client, subscription, - histogram_data=self._manager.dict(), - ) + super(Policy, self).__init__(client, subscription) # Also maintain a request queue and an executor. logger.debug('Creating callback requests thread (not starting).') - self._executor = futures.ProcessPoolExecutor() + self._executor = futures.ThreadPoolExecutor() self._callback_requests = helper_threads.QueueCallbackThread( self._request_queue, self.on_callback_request, @@ -67,33 +56,16 @@ def __init__(self, client, subscription): # Spawn a process that maintains all of the leases for this policy. logger.debug('Spawning lease process.') - self._lease_process = multiprocessing.Process( + self._lease_process = threading.Thread( target=self.maintain_leases, ) self._lease_process.daemon = True self._lease_process.start() - @property - def managed_ack_ids(self): - """Return the ack IDs currently being managed by the policy. - - Returns: - set: The set of ack IDs being managed. - """ - return self._managed_ack_ids - - @property - def subscription(self): - """Return the subscription. - - Returns: - str: The subscription - """ - return self._shared.subscription - def close(self): """Close the existing connection.""" self._consumer.helper_threads.stop('callback requests worker') + self._consumer.stop_consuming() def open(self, callback): """Open a streaming pull connection and begin receiving messages. @@ -138,5 +110,6 @@ def on_response(self, response): For each message, schedule a callback with the executor. """ for msg in response.received_messages: + logger.debug('New message received from Pub/Sub: %r', msg) message = Message(self, msg.ack_id, msg.message) self._executor.submit(self._callback, message) From 1df0ccfe51dc55d2e3ec7a367c4903f3e5313dd9 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 13 Jul 2017 12:33:54 -0700 Subject: [PATCH 16/63] WIP --- .../cloud/pubsub_v1/publisher/batch/thread.py | 3 ++- .../cloud/pubsub_v1/publisher/client.py | 4 ++-- .../cloud/pubsub_v1/subscriber/consumer.py | 9 ++++++++- .../cloud/pubsub_v1/subscriber/policy/base.py | 4 ++++ .../pubsub_v1/subscriber/policy/thread.py | 19 +++++++++++-------- 5 files changed, 27 insertions(+), 12 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py index 95418dc9cada..2744145a848b 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py @@ -230,7 +230,8 @@ def publish(self, message): :class:`concurrent.futures.Future` interface. """ # Coerce the type, just in case. - message = types.PubsubMessage(message) + if not isinstance(message, types.PubsubMessage): + message = types.PubsubMessage(message) # Add the size to the running total of the size, so we know # if future messages need to be rejected. diff --git a/pubsub/google/cloud/pubsub_v1/publisher/client.py b/pubsub/google/cloud/pubsub_v1/publisher/client.py index 5ebbceec84e6..7aebdb1c4f9f 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/client.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/client.py @@ -147,9 +147,9 @@ def publish(self, topic, data, **attrs): # Coerce all attributes to text strings. for k, v in copy.copy(attrs).items(): - if isinstance(data, six.text_type): + if isinstance(v, six.text_type): continue - if isinstance(data, six.binary_type): + if isinstance(v, six.binary_type): attrs[k] = v.decode('utf-8') continue raise TypeError('All attributes being published to Pub/Sub must ' diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py index 8add5aef23c4..5d679dc0099c 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py @@ -178,6 +178,7 @@ def __init__(self, policy): self._request_queue = queue.Queue() self._exiting = threading.Event() + self.active = False self.helper_threads = helper_threads.HelperThreadRegistry() """:cls:`_helper_threads.HelperThreads`: manages the helper threads. The policy may use this to schedule its own helper threads. @@ -239,10 +240,15 @@ def _blocking_consume(self): except KeyboardInterrupt: self.stop_consuming() except Exception as exc: - self._policy.on_exception(exc) + try: + self._policy.on_exception(exc) + except: + self.active = False + raise def start_consuming(self): """Start consuming the stream.""" + self.active = True self._exiting.clear() self.helper_threads.start('consume bidirectional stream', self._request_queue, @@ -251,5 +257,6 @@ def start_consuming(self): def stop_consuming(self): """Signal the stream to stop and block until it completes.""" + self.active = False self._exiting.set() self.helper_threads.stop_all() diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index b740c0829865..77f24f57f92d 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -170,6 +170,10 @@ def maintain_leases(self): implementing your own policy, you _should_ call this method in an appropriate form of subprocess. """ + # Sanity check: Should this infinitely-recursive loop quit? + if not self._consumer.active: + return + # Determine the appropriate duration for the lease. # This is based off of how long previous messages have taken to ack, # with a sensible default and within the ranges allowed by Pub/Sub. diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py index 329cba9e3d86..acb318006625 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -54,16 +54,9 @@ def __init__(self, client, subscription): self.on_callback_request, ) - # Spawn a process that maintains all of the leases for this policy. - logger.debug('Spawning lease process.') - self._lease_process = threading.Thread( - target=self.maintain_leases, - ) - self._lease_process.daemon = True - self._lease_process.start() - def close(self): """Close the existing connection.""" + # Close the main subscription connection. self._consumer.helper_threads.stop('callback requests worker') self._consumer.stop_consuming() @@ -77,14 +70,24 @@ def open(self, callback): Args: callback (Callable): The callback function. """ + # Start the thread to pass the requests. logger.debug('Starting callback requests worker.') self._callback = callback self._consumer.helper_threads.start('callback requests worker', self._request_queue, self._callback_requests, ) + + # Actually start consuming messages. self._consumer.start_consuming() + # Spawn a helper thread that maintains all of the leases for + # this policy. + logger.debug('Spawning lease maintenance worker.') + self._leaser = threading.Thread(target=self.maintain_leases) + self._leaser.daemon = True + self._leaser.start() + def on_callback_request(self, callback_request): """Map the callback request to the appropriate GRPC request.""" action, args = callback_request[0], callback_request[1:] From acb4534dda76fe367d080cbfe934a642974246f4 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 13 Jul 2017 13:25:56 -0700 Subject: [PATCH 17/63] WIP --- pubsub/google/cloud/pubsub_v1/subscriber/consumer.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py index 5d679dc0099c..a3f237d7fe63 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py @@ -200,6 +200,9 @@ def _request_generator_thread(self): """ # First, yield the initial request. This occurs on every new # connection, fundamentally including a resumed connection. + _LOGGER.debug('Sending initial request: {initial_request}'.format( + initial_request=self._policy.initial_request, + )) yield self._policy.initial_request # Now yield each of the items on the request queue, and block if there From 2fb27855748d21f58e94501deb2f5341b2bc8db4 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 13 Jul 2017 14:34:22 -0700 Subject: [PATCH 18/63] Update subscriber client config to be sane. --- .../gapic/pubsub/v1/subscriber_client_config.json | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client_config.json b/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client_config.json index 4b31158fbac8..6180cc0a941f 100644 --- a/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client_config.json +++ b/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client_config.json @@ -35,6 +35,15 @@ "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 12000, "total_timeout_millis": 600000 + }, + "streaming": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 900000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 900000, + "total_timeout_millis": 900000 } }, "methods": { @@ -79,9 +88,9 @@ "retry_params_name": "messaging" }, "StreamingPull": { - "timeout_millis": 60000, + "timeout_millis": 900000, "retry_codes_name": "pull", - "retry_params_name": "messaging" + "retry_params_name": "streaming" }, "ModifyPushConfig": { "timeout_millis": 60000, From ef178e9de342cd839d07ebf0b1bc8edaa89335ff Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 18 Jul 2017 13:59:02 -0700 Subject: [PATCH 19/63] Start adding unit tests. --- pubsub/google/cloud/pubsub_v1/__init__.py | 4 +- .../cloud/pubsub_v1/publisher/__init__.py | 6 +- .../cloud/pubsub_v1/publisher/batch/thread.py | 2 - .../cloud/pubsub_v1/publisher/client.py | 8 +- .../cloud/pubsub_v1/subscriber/__init__.py | 4 +- .../cloud/pubsub_v1/subscriber/client.py | 2 +- pubsub/google/cloud/pubsub_v1/types.py | 2 +- pubsub/tests/unit/__init__.py | 0 pubsub/tests/unit/pubsub_v1/__init__.py | 0 .../unit/pubsub_v1/publisher/test_client.py | 123 ++++++++++++++++++ 10 files changed, 137 insertions(+), 14 deletions(-) create mode 100644 pubsub/tests/unit/__init__.py create mode 100644 pubsub/tests/unit/pubsub_v1/__init__.py create mode 100644 pubsub/tests/unit/pubsub_v1/publisher/test_client.py diff --git a/pubsub/google/cloud/pubsub_v1/__init__.py b/pubsub/google/cloud/pubsub_v1/__init__.py index 7e785dc9dc7a..21706f6eee5e 100644 --- a/pubsub/google/cloud/pubsub_v1/__init__.py +++ b/pubsub/google/cloud/pubsub_v1/__init__.py @@ -15,8 +15,8 @@ from __future__ import absolute_import from google.cloud.pubsub_v1 import types -from google.cloud.pubsub_v1.publisher import PublisherClient -from google.cloud.pubsub_v1.subscriber import SubscriberClient +from google.cloud.pubsub_v1.publisher import Client as PublisherClient +from google.cloud.pubsub_v1.subscriber import Client as SubscriberClient __all__ = ( 'PublisherClient', diff --git a/pubsub/google/cloud/pubsub_v1/publisher/__init__.py b/pubsub/google/cloud/pubsub_v1/publisher/__init__.py index 60496983b352..76d54649448f 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/__init__.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/__init__.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from google.cloud.pubsub_v1.publisher.client import PublisherClient +from __future__ import absolute_import + +from google.cloud.pubsub_v1.publisher.client import Client __all__ = ( - 'PublisherClient', + 'Client', ) diff --git a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py index 2744145a848b..b963ec1b8370 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py @@ -173,8 +173,6 @@ def _commit(self): self.messages, ) - # FIXME (lukesneeringer): Check for failures; retry. - # We got a response from Pub/Sub; denote that we are processing. self._status = 'processing results' diff --git a/pubsub/google/cloud/pubsub_v1/publisher/client.py b/pubsub/google/cloud/pubsub_v1/publisher/client.py index 7aebdb1c4f9f..c5b56063a8a2 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/client.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/client.py @@ -31,7 +31,7 @@ @_gapic.add_methods(publisher_client.PublisherClient, blacklist=('publish',)) -class PublisherClient(object): +class Client(object): """A publisher client for Google Cloud Pub/Sub. This creates an object that is capable of publishing messages. @@ -70,7 +70,7 @@ def __init__(self, batch_settings=(), batch_class=thread.Batch, **kwargs): self._rejection = base.RejectionBatch() def batch(self, topic, message, create=True, autocommit=True): - """Return the current batch. + """Return the current batch for the provided topic. This will create a new batch only if no batch currently exists. @@ -84,7 +84,7 @@ def batch(self, topic, message, create=True, autocommit=True): This is primarily useful for debugging. Returns: - :class:~`pubsub_v1.batch.Batch` The batch object. + ~.pubsub_v1.batch.Batch: The batch object. """ # If there is no matching batch yet, then potentially create one # and place it on the batches dictionary. @@ -136,7 +136,7 @@ def publish(self, topic, data, **attrs): sent as metadata. (These may be text strings or byte strings.) Returns: - :class:`~.pubsub_v1.publisher.futures.Future`: An object conforming + ~.pubsub_v1.publisher.futures.Future: An object conforming to the ``concurrent.futures.Future`` interface. """ # Sanity check: Is the data being sent as a bytestring? diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/__init__.py b/pubsub/google/cloud/pubsub_v1/subscriber/__init__.py index ee2aaca57ef0..d98a7bb75be4 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/__init__.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/__init__.py @@ -14,9 +14,9 @@ from __future__ import absolute_import -from google.cloud.pubsub_v1.subscriber.client import SubscriberClient +from google.cloud.pubsub_v1.subscriber.client import Client __all__ = ( - 'SubscriberClient', + 'Client', ) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/client.py b/pubsub/google/cloud/pubsub_v1/subscriber/client.py index 0a08bb9f4f2b..58fa66881ba7 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/client.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/client.py @@ -27,7 +27,7 @@ @_gapic.add_methods(subscriber_client.SubscriberClient, blacklist=('pull', 'streaming_pull')) -class SubscriberClient(object): +class Client(object): """A subscriber client for Google Cloud Pub/Sub. This creates an object that is capable of subscribing to messages. diff --git a/pubsub/google/cloud/pubsub_v1/types.py b/pubsub/google/cloud/pubsub_v1/types.py index 778391f1c21f..f7ab43d1ea4e 100644 --- a/pubsub/google/cloud/pubsub_v1/types.py +++ b/pubsub/google/cloud/pubsub_v1/types.py @@ -33,7 +33,7 @@ ) BatchSettings.__new__.__defaults__ = ( 1024 * 1024 * 5, # max_bytes: 5 MB - 0.25, # max_latency: 0.25 seconds + 1.0, # max_latency: 1.0 seconds 1000, # max_messages: 1,000 ) diff --git a/pubsub/tests/unit/__init__.py b/pubsub/tests/unit/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/pubsub/tests/unit/pubsub_v1/__init__.py b/pubsub/tests/unit/pubsub_v1/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/pubsub/tests/unit/pubsub_v1/publisher/test_client.py b/pubsub/tests/unit/pubsub_v1/publisher/test_client.py new file mode 100644 index 000000000000..3f9374a20fd6 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/publisher/test_client.py @@ -0,0 +1,123 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +import pytest + +from google.cloud.gapic.pubsub.v1 import publisher_client + +from google.cloud.pubsub_v1 import publisher +from google.cloud.pubsub_v1 import types + + +def test_init(): + client = publisher.Client() + + # A plain client should have an `api` (the underlying GAPIC) and a + # batch settings object, which should have the defaults. + assert isinstance(client.api, publisher_client.PublisherClient) + assert client.batch_settings.max_bytes == 5 * (2 ** 20) + assert client.batch_settings.max_latency == 1.0 + assert client.batch_settings.max_messages == 1000 + + +def test_batch_accepting(): + """Establish that an existing batch is returned if it accepts messages.""" + client = publisher.Client() + message = types.PubsubMessage(data=b'foo') + + # At first, there are no batches, so this should return a new batch + # which is also saved to the object. + ante = len(client._batches) + batch = client.batch('topic_name', message, autocommit=False) + assert len(client._batches) == ante + 1 + assert batch is client._batches['topic_name'] + + # A subsequent request should return the same batch. + batch2 = client.batch('topic_name', message, autocommit=False) + assert batch is batch2 + assert batch2 is client._batches['topic_name'] + + +def test_batch_without_autocreate(): + client = publisher.Client() + message = types.PubsubMessage(data=b'foo') + + # If `create=False` is sent, then when the batch is not found, None + # is returned instead. + ante = len(client._batches) + batch = client.batch('topic_name', message, create=False) + assert batch is None + assert len(client._batches) == ante + + +def test_publish(): + client = publisher.Client() + + # Use a mock in lieu of the actual batch class; set the mock up to claim + # indiscriminately that it accepts all messages. + batch = mock.Mock(spec=client._batch_class) + batch.will_accept.return_value = True + client._batches['topic_name'] = batch + + # Begin publishing. + client.publish('topic_name', b'spam') + client.publish('topic_name', b'foo', bar='baz') + + # The batch's publish method should have been called twice. + assert batch.publish.call_count == 2 + + # In both cases + # The first call should correspond to the first message. + _, args, _ = batch.publish.mock_calls[0] + assert args[0].data == b'spam' + assert not args[0].attributes + + # The second call should correspond to the second message. + _, args, _ = batch.publish.mock_calls[1] + assert args[0].data == b'foo' + assert args[0].attributes == {u'bar': u'baz'} + + +def test_publish_data_not_bytestring_error(): + client = publisher.Client() + with pytest.raises(TypeError): + client.publish(u'This is a text string.') + with pytest.raises(TypeError): + client.publish(42) + + +def test_publish_attrs_bytestring(): + client = publisher.Client() + + # Use a mock in lieu of the actual batch class; set the mock up to claim + # indiscriminately that it accepts all messages. + batch = mock.Mock(spec=client._batch_class) + batch.will_accept.return_value = True + client._batches['topic_name'] = batch + + # Begin publishing. + client.publish('topic_name', b'foo', bar=b'baz') + + # The attributes should have been sent as text. + _, args, _ = batch.publish.mock_calls[0] + assert args[0].data == b'foo' + assert args[0].attributes == {u'bar': u'baz'} + + +def test_publish_attrs_type_error(): + client = publisher.Client() + with pytest.raises(TypeError): + client.publish(b'foo', answer=42) From 147ad18d9c9318dbf71458460dc46db79a464e2c Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 19 Jul 2017 12:13:14 -0700 Subject: [PATCH 20/63] Beginning work on unit tests. --- .../cloud/pubsub_v1/publisher/batch/thread.py | 45 ++-- .../pubsub_v1/publisher/batch/test_thread.py | 197 ++++++++++++++++++ .../publisher/batch/test_thread_future.py | 112 ++++++++++ .../unit/pubsub_v1/publisher/test_client.py | 6 +- 4 files changed, 339 insertions(+), 21 deletions(-) create mode 100644 pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py create mode 100644 pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py diff --git a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py index b963ec1b8370..37664ef5ffec 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py @@ -60,16 +60,15 @@ class Batch(base.BaseBatch): def __init__(self, client, topic, settings, autocommit=True): self._client = client - # Create a namespace that is owned by the client manager; this - # is necessary to be able to have these values be communicable between - # processes. + # These objects are all communicated between threads; ensure that + # any writes to them are atomic. self._futures = [] self._messages = [] self._size = 0 - self._message_ids = {} self._settings = settings self._status = self.Status.ACCEPTING_MESSAGES self._topic = topic + self.message_ids = {} # This is purely internal tracking. self._thread = None @@ -167,8 +166,8 @@ def _commit(self): # Begin the request to publish these messages. if len(self._messages) == 0: - raise Exception('Empty queue') - response = self._client.api.publish( + return + response = self.client.api.publish( self._topic, self.messages, ) @@ -188,7 +187,7 @@ def _commit(self): # if not. self._status = self.Status.SUCCESS for message_id, fut in zip(response.message_ids, self._futures): - self._message_ids[hash(fut)] = message_id + self.message_ids[hash(fut)] = message_id fut._trigger() def monitor(self): @@ -229,7 +228,7 @@ def publish(self, message): """ # Coerce the type, just in case. if not isinstance(message, types.PubsubMessage): - message = types.PubsubMessage(message) + message = types.PubsubMessage(**message) # Add the size to the running total of the size, so we know # if future messages need to be rejected. @@ -255,7 +254,7 @@ class Future(object): methods in this library. Args: - batch (:class:`~.Batch`): The batch object that is committing + batch (`~.Batch`): The batch object that is committing this message. """ def __init__(self, batch): @@ -290,10 +289,13 @@ def running(self): def done(self): """Return True if the publish has completed, False otherwise. - This still returns True in failure cases; checking `result` or - `exception` is the canonical way to assess success or failure. + This still returns True in failure cases; checking :meth:`result` or + :meth:`exception` is the canonical way to assess success or failure. """ - return self._batch.status in ('success', 'error') + return self._batch.status in ( + self._batch.Status.SUCCESS, + self._batch.Status.ERROR, + ) def result(self, timeout=None): """Return the message ID, or raise an exception. @@ -305,9 +307,12 @@ def result(self, timeout=None): timeout (int|float): The number of seconds before this call times out and raises TimeoutError. + Returns: + str: The message ID. + Raises: - :class:~`pubsub_v1.TimeoutError`: If the request times out. - :class:~`Exception`: For undefined exceptions in the underlying + ~.pubsub_v1.TimeoutError: If the request times out. + Exception: For undefined exceptions in the underlying call execution. """ # Attempt to get the exception if there is one. @@ -329,11 +334,15 @@ def exception(self, timeout=None, _wait=1): times out and raises TimeoutError. Raises: - :exc:`TimeoutError`: If the request times out. + TimeoutError: If the request times out. Returns: - :class:`Exception`: The exception raised by the call, if any. + Exception: The exception raised by the call, if any. """ + # If no timeout was specified, use inf. + if timeout is None: + timeout = float('inf') + # If the batch completed successfully, this should return None. if self._batch.status == 'success': return None @@ -343,14 +352,14 @@ def exception(self, timeout=None, _wait=1): return self._batch.error # If the timeout has been exceeded, raise TimeoutError. - if timeout and timeout < 0: + if timeout <= 0: raise exceptions.TimeoutError('Timed out waiting for exception.') # Wait a little while and try again. time.sleep(_wait) return self.exception( timeout=timeout - _wait, - _wait=min(_wait * 2, 60), + _wait=min(_wait * 2, timeout, 60), ) def add_done_callback(self, fn): diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py new file mode 100644 index 000000000000..bbbc9890a8b1 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py @@ -0,0 +1,197 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading +import time + +import mock + +import pytest + +from google.cloud.pubsub_v1 import publisher +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.publisher import exceptions +from google.cloud.pubsub_v1.publisher.batch.thread import Batch + + +def create_batch(autocommit=False, **batch_settings): + """Return a batch object suitable for testing. + + Args: + autocommit (bool): Whether the batch should commit after + ``max_latency`` seconds. By default, this is ``False`` + for unit testing. + kwargs (dict): Arguments passed on to the + :class:``~.pubsub_v1.types.BatchSettings`` constructor. + + Returns: + ~.pubsub_v1.publisher.batch.thread.Batch: A batch object. + """ + client = publisher.Client() + settings = types.BatchSettings(**batch_settings) + return Batch(client, 'topic_name', settings, autocommit=autocommit) + + +def test_init(): + """Establish that a monitor thread is usually created on init.""" + client = publisher.Client() + + # Do not actually create a thread, but do verify that one was created; + # it should be running the batch's "monitor" method (which commits the + # batch once time elapses). + with mock.patch.object(threading, 'Thread', autospec=True) as Thread: + batch = Batch(client, 'topic_name', types.BatchSettings()) + Thread.assert_called_once_with(target=batch.monitor) + + # New batches start able to accept messages by default. + assert batch.status == batch.Status.ACCEPTING_MESSAGES + + +def test_init_infinite_latency(): + batch = create_batch(max_latency=float('inf')) + assert batch._thread is None + + +def test_client(): + client = publisher.Client() + settings = types.BatchSettings() + batch = Batch(client, 'topic_name', settings, autocommit=False) + assert batch.client is client + + +def test_commit(): + batch = create_batch() + with mock.patch.object(threading, 'Thread', autospec=True) as Thread: + batch.commit() + + # A thread should have been created to do the actual commit. + Thread.assert_called_once_with(target=batch._commit) + Thread.return_value.start.assert_called_once_with() + + # The batch's status needs to be something other than "accepting messages", + # since the commit started. + assert batch.status != batch.Status.ACCEPTING_MESSAGES + + +def test_blocking_commit(): + batch = create_batch() + futures = ( + batch.publish({'data': b'This is my message.'}), + batch.publish({'data': b'This is another message.'}), + ) + + # Set up the underlying API publish method to return a PublishResponse. + with mock.patch.object(type(batch.client.api), 'publish') as publish: + publish.return_value = types.PublishResponse(message_ids=['a', 'b']) + + # Actually commit the batch. + batch._commit() + + # Establish that the underlying API call was made with expected + # arguments. + publish.assert_called_once_with('topic_name', [ + types.PubsubMessage(data=b'This is my message.'), + types.PubsubMessage(data=b'This is another message.'), + ]) + + # Establish that all of the futures are done, and that they have the + # expected values. + assert all([f.done() for f in futures]) + assert futures[0].result() == 'a' + assert futures[1].result() == 'b' + + +def test_blocking_commit_no_messages(): + batch = create_batch() + with mock.patch.object(type(batch.client.api), 'publish') as publish: + batch._commit() + assert publish.call_count == 0 + + +def test_blocking_commit_wrong_messageid_length(): + batch = create_batch() + batch.publish({'data': b'blah blah blah'}) + batch.publish({'data': b'blah blah blah blah'}) + + # Set up a PublishResponse that only returns one message ID. + with mock.patch.object(type(batch.client.api), 'publish') as publish: + publish.return_value = types.PublishResponse(message_ids=['a']) + with pytest.raises(exceptions.PublishError): + batch._commit() + + +def test_monitor(): + batch = create_batch(max_latency=5.0) + with mock.patch.object(time, 'sleep') as sleep: + with mock.patch.object(type(batch), '_commit') as _commit: + batch.monitor() + + # The monitor should have waited the given latency. + sleep.assert_called_once_with(5.0) + + # Since `monitor` runs in its own thread, it should call + # the blocking commit implementation. + _commit.assert_called_once_with() + + +def test_monitor_already_committed(): + batch = create_batch(max_latency=5.0) + batch._status = 'something else' + with mock.patch.object(time, 'sleep') as sleep: + with mock.patch.object(type(batch), '_commit') as _commit: + batch.monitor() + + # The monitor should have waited the given latency. + sleep.assert_called_once_with(5.0) + + # Since the batch was no longer accepting messages, the + # commit function should *not* have been called. + assert _commit.call_count == 0 + + +def test_publish(): + batch = create_batch() + messages = ( + types.PubsubMessage(data=b'foobarbaz'), + types.PubsubMessage(data=b'spameggs'), + types.PubsubMessage(data=b'1335020400'), + ) + + # Publish each of the messages, which should save them to the batch. + for message in messages: + batch.publish(message) + + # There should be three messages on the batch, and three futures. + assert len(batch.messages) == 3 + assert len(batch._futures) == 3 + + # The size should have been incremented by the sum of the size of the + # messages. + assert batch.size == sum([m.ByteSize() for m in messages]) + assert batch.size > 0 # I do not always trust protobuf. + + +def test_publish_dict(): + batch = create_batch() + batch.publish({'data': b'foobarbaz', 'attributes': {'spam': 'eggs'}}) + + # There should be one message on the batch. + assert len(batch.messages) == 1 + + # It should be an actual protobuf Message at this point, with the + # expected values. + message = batch.messages[0] + assert isinstance(message, types.PubsubMessage) + assert message.data == b'foobarbaz' + assert message.attributes == {'spam': 'eggs'} diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py new file mode 100644 index 000000000000..2b2cb26e1303 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py @@ -0,0 +1,112 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +import mock + +import pytest + +from google.cloud.pubsub_v1 import publisher +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.publisher import exceptions +from google.cloud.pubsub_v1.publisher.batch.thread import Batch +from google.cloud.pubsub_v1.publisher.batch.thread import Future + + +def create_batch(status=None): + """Create a batch object, which does not commit. + + Args: + status (str): If provided, the batch's internal status will be set + to the provided status. + + Returns: + ~.pubsub_v1.publisher.batch.thread.Batch: The batch object + """ + client = publisher.Client() + batch_settings = types.BatchSettings() + batch = Batch(client, 'topic_name', batch_settings, autocommit=False) + if status: + batch._status = status + return batch + + +def create_future(batch=None): + """Create a Future object to test. + + Args: + ~.pubsub_v1.publisher.batch.thread.Batch: A batch object, such + as one returned from :meth:`create_batch`. If none is provided, + a batch will be automatically created. + + Returns: + ~.pubsub_v1.publisher.batch.thread.Future: The Future object (the + class being tested in this module). + """ + if batch is None: + batch = create_batch() + return Future(batch=batch) + + +def test_cancel(): + assert create_future().cancel() is False + + +def test_cancelled(): + assert create_future().cancelled() is False + + +def test_running(): + assert create_future().running() is True + + +def test_done(): + batch = create_batch() + future = create_future(batch=batch) + assert future.done() is False + batch._status = batch.Status.SUCCESS + assert future._batch.status == 'success' + assert future.done() is True + + +def test_exception_no_error(): + batch = create_batch(status='success') + future = create_future(batch=batch) + assert future.exception() is None + + +def test_exception_with_error(): + batch = create_batch(status='error') + batch.error = RuntimeError('Something really bad happened.') + future = create_future(batch=batch) + + # Make sure that the exception that is returned is the batch's error. + # Also check the type to ensure the batch's error did not somehow + # change internally. + assert future.exception() is batch.error + assert isinstance(future.exception(), RuntimeError) + + +def test_exception_timeout(): + future = create_future() + with mock.patch.object(time, 'sleep') as sleep: + with pytest.raises(exceptions.TimeoutError): + future.exception(timeout=10) + + # The sleep should have been called with 1, 2, 4, then 3 seconds + # (the first three due to linear backoff, then the last one because + # only three seconds were left before the timeout was to be hit). + assert sleep.call_count == 4 + assert sleep.mock_calls[0] diff --git a/pubsub/tests/unit/pubsub_v1/publisher/test_client.py b/pubsub/tests/unit/pubsub_v1/publisher/test_client.py index 3f9374a20fd6..6d5f653a46f4 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/test_client.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/test_client.py @@ -94,9 +94,9 @@ def test_publish(): def test_publish_data_not_bytestring_error(): client = publisher.Client() with pytest.raises(TypeError): - client.publish(u'This is a text string.') + client.publish('topic_name', u'This is a text string.') with pytest.raises(TypeError): - client.publish(42) + client.publish('topic_name', 42) def test_publish_attrs_bytestring(): @@ -120,4 +120,4 @@ def test_publish_attrs_bytestring(): def test_publish_attrs_type_error(): client = publisher.Client() with pytest.raises(TypeError): - client.publish(b'foo', answer=42) + client.publish('topic_name', b'foo', answer=42) From 9c701e320660ab96ac7e1e3b9ada6310df639ef0 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 25 Jul 2017 09:02:43 -0700 Subject: [PATCH 21/63] Publisher tests complete. --- pubsub/.coveragerc | 6 ++ .../pubsub_v1/publisher/batch/test_base.py | 63 +++++++++++++++++++ .../publisher/batch/test_thread_future.py | 41 ++++++++++++ .../unit/pubsub_v1/publisher/test_client.py | 15 +++++ 4 files changed, 125 insertions(+) create mode 100644 pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py diff --git a/pubsub/.coveragerc b/pubsub/.coveragerc index a54b99aa14b7..588fc38a3c2d 100644 --- a/pubsub/.coveragerc +++ b/pubsub/.coveragerc @@ -1,5 +1,9 @@ [run] branch = True +source = + google.cloud.pubsub + google.cloud.pubsub_v1 + tests.unit [report] fail_under = 100 @@ -9,3 +13,5 @@ exclude_lines = pragma: NO COVER # Ignore debug-only repr def __repr__ + # Ignore abstract methods + raise NotImplementedError diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py new file mode 100644 index 000000000000..17203e129922 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py @@ -0,0 +1,63 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +from google.cloud.pubsub_v1 import publisher +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.publisher.batch.thread import Batch + + +def create_batch(status=None, settings=types.BatchSettings()): + """Create a batch object, which does not commit. + + Args: + status (str): If provided, the batch's internal status will be set + to the provided status. + + Returns: + ~.pubsub_v1.publisher.batch.thread.Batch: The batch object + """ + client = publisher.Client() + batch = Batch(client, 'topic_name', settings, autocommit=False) + if status: + batch._status = status + return batch + + +def test_len(): + batch = create_batch(status=Batch.Status.ACCEPTING_MESSAGES) + assert len(batch) == 0 + batch.publish(types.PubsubMessage(data=b'foo')) + assert len(batch) == 1 + +def test_will_accept(): + batch = create_batch(status=Batch.Status.ACCEPTING_MESSAGES) + message = types.PubsubMessage() + assert batch.will_accept(message) is True + + +def test_will_not_accept_status(): + batch = create_batch(status='talk to the hand') + message = types.PubsubMessage() + assert batch.will_accept(message) is False + + +def test_will_not_accept_size(): + batch = create_batch( + settings=types.BatchSettings(max_bytes=10), + status=Batch.Status.ACCEPTING_MESSAGES, + ) + message = types.PubsubMessage(data=b'abcdefghijklmnopqrstuvwxyz') + assert batch.will_accept(message) is False diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py index 2b2cb26e1303..89661947ddee 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py @@ -110,3 +110,44 @@ def test_exception_timeout(): # only three seconds were left before the timeout was to be hit). assert sleep.call_count == 4 assert sleep.mock_calls[0] + + +def test_result_no_error(): + batch = create_batch(status='success') + future = create_future(batch=batch) + batch.message_ids[hash(future)] = '42' + assert future.result() == '42' + + +def test_result_with_error(): + batch = create_batch(status='error') + batch.error = RuntimeError('Something really bad happened.') + future = create_future(batch=batch) + with pytest.raises(RuntimeError): + future.result() + + +def test_add_done_callback_pending_batch(): + future = create_future() + callback = mock.Mock() + future.add_done_callback(callback) + assert len(future._callbacks) == 1 + assert callback in future._callbacks + assert callback.call_count == 0 + + +def test_add_done_callback_completed_batch(): + batch = create_batch(status='success') + future = create_future(batch=batch) + callback = mock.Mock() + future.add_done_callback(callback) + callback.assert_called_once_with(future) + + +def test_trigger(): + future = create_future() + callback = mock.Mock() + future.add_done_callback(callback) + assert callback.call_count == 0 + future._trigger() + callback.assert_called_once_with(future) diff --git a/pubsub/tests/unit/pubsub_v1/publisher/test_client.py b/pubsub/tests/unit/pubsub_v1/publisher/test_client.py index 6d5f653a46f4..6ee66d636578 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/test_client.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/test_client.py @@ -121,3 +121,18 @@ def test_publish_attrs_type_error(): client = publisher.Client() with pytest.raises(TypeError): client.publish('topic_name', b'foo', answer=42) + + +def test_gapic_instance_method(): + client = publisher.Client() + with mock.patch.object(client.api, '_create_topic', autospec=True) as ct: + client.create_topic('projects/foo/topics/bar') + assert ct.call_count == 1 + _, args, _ = ct.mock_calls[0] + assert args[0] == types.Topic(name='projects/foo/topics/bar') + + +def test_gapic_class_method(): + client = publisher.Client() + answer = client.topic_path('foo', 'bar') + assert answer == 'projects/foo/topics/bar' From de38b839d8d7d2ba057b7d218a9cc1ddd1af8d44 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 26 Jul 2017 07:19:14 -0700 Subject: [PATCH 22/63] subscriber/client.py tests --- pubsub/tests/unit/pubsub_v1/__init__.py | 0 .../pubsub_v1/publisher/batch/test_base.py | 1 + .../publisher/batch/test_thread_future.py | 4 +- ...est_client.py => test_publisher_client.py} | 0 .../pubsub_v1/subscriber/test_consumer.py | 13 +++++++ .../subscriber/test_subscriber_client.py | 38 +++++++++++++++++++ 6 files changed, 54 insertions(+), 2 deletions(-) delete mode 100644 pubsub/tests/unit/pubsub_v1/__init__.py rename pubsub/tests/unit/pubsub_v1/publisher/{test_client.py => test_publisher_client.py} (100%) create mode 100644 pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py create mode 100644 pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py diff --git a/pubsub/tests/unit/pubsub_v1/__init__.py b/pubsub/tests/unit/pubsub_v1/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py index 17203e129922..5210d2e62b58 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py @@ -42,6 +42,7 @@ def test_len(): batch.publish(types.PubsubMessage(data=b'foo')) assert len(batch) == 1 + def test_will_accept(): batch = create_batch(status=Batch.Status.ACCEPTING_MESSAGES) message = types.PubsubMessage() diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py index 89661947ddee..ee4014ee3691 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py @@ -139,14 +139,14 @@ def test_add_done_callback_pending_batch(): def test_add_done_callback_completed_batch(): batch = create_batch(status='success') future = create_future(batch=batch) - callback = mock.Mock() + callback = mock.Mock(spec=()) future.add_done_callback(callback) callback.assert_called_once_with(future) def test_trigger(): future = create_future() - callback = mock.Mock() + callback = mock.Mock(spec=()) future.add_done_callback(callback) assert callback.call_count == 0 future._trigger() diff --git a/pubsub/tests/unit/pubsub_v1/publisher/test_client.py b/pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py similarity index 100% rename from pubsub/tests/unit/pubsub_v1/publisher/test_client.py rename to pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py new file mode 100644 index 000000000000..4fc99a9082dc --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py @@ -0,0 +1,13 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py new file mode 100644 index 000000000000..77f8b016abb6 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py @@ -0,0 +1,38 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from google.cloud.pubsub_v1 import subscriber +from google.cloud.pubsub_v1.subscriber.policy import thread + + +def test_init(): + client = subscriber.Client() + assert client._policy_class is thread.Policy + + +def test_subscribe(): + client = subscriber.Client() + subscription = client.subscribe('sub_name') + assert isinstance(subscription, thread.Policy) + + +def test_subscribe_with_callback(): + client = subscriber.Client() + callback = mock.Mock() + with mock.patch.object(thread.Policy, 'open') as open_: + subscription = client.subscribe('sub_name', callback) + open_.assert_called_once_with(callback) + assert isinstance(subscription, thread.Policy) From faeaa8edcc56a011b9398992e9943597c2c35243 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 26 Jul 2017 08:53:34 -0700 Subject: [PATCH 23/63] Consumer tests --- .../cloud/pubsub_v1/subscriber/exceptions.py | 19 ---- .../pubsub_v1/subscriber/test_consumer.py | 103 ++++++++++++++++++ 2 files changed, 103 insertions(+), 19 deletions(-) delete mode 100644 pubsub/google/cloud/pubsub_v1/subscriber/exceptions.py diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/exceptions.py b/pubsub/google/cloud/pubsub_v1/subscriber/exceptions.py deleted file mode 100644 index 43a659974c23..000000000000 --- a/pubsub/google/cloud/pubsub_v1/subscriber/exceptions.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2017, Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import - - -class AlreadyOpen(RuntimeError): - pass diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py index 4fc99a9082dc..24d60a627989 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py @@ -11,3 +11,106 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import queue + +import mock + +import pytest + +from google.cloud.pubsub_v1 import subscriber +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.subscriber import consumer +from google.cloud.pubsub_v1.subscriber import helper_threads +from google.cloud.pubsub_v1.subscriber.policy import thread + + +def create_consumer(): + client = subscriber.Client() + subscription = client.subscribe('sub_name') + return consumer.Consumer(policy=subscription) + + +def test_send_request(): + consumer = create_consumer() + request = types.StreamingPullRequest(subscription='foo') + with mock.patch.object(queue.Queue, 'put') as put: + consumer.send_request(request) + put.assert_called_once_with(request) + + +def test_request_generator_thread(): + consumer = create_consumer() + generator = consumer._request_generator_thread() + + # The first request that comes from the request generator thread + # should always be the initial request. + initial_request = next(generator) + assert initial_request.subscription == 'sub_name' + assert initial_request.stream_ack_deadline_seconds == 10 + + # Subsequent requests correspond to items placed in the request queue. + consumer.send_request(types.StreamingPullRequest(ack_ids=['i'])) + request = next(generator) + assert request.ack_ids == ['i'] + + # The poison pill should stop the loop. + consumer.send_request(helper_threads.STOP) + with pytest.raises(StopIteration): + next(generator) + + +def test_blocking_consume(): + consumer = create_consumer() + Policy = type(consumer._policy) + + # Establish that we get responses until we run out of them. + with mock.patch.object(Policy, 'call_rpc', autospec=True) as call_rpc: + call_rpc.return_value = (mock.sentinel.A, mock.sentinel.B) + with mock.patch.object(Policy, 'on_response', autospec=True) as on_res: + consumer._blocking_consume() + assert on_res.call_count == 2 + assert on_res.mock_calls[0][1][1] == mock.sentinel.A + assert on_res.mock_calls[1][1][1] == mock.sentinel.B + + +def test_blocking_consume_keyboard_interrupt(): + consumer = create_consumer() + Policy = type(consumer._policy) + + # Establish that we get responses until we are sent the exiting event. + with mock.patch.object(Policy, 'call_rpc', autospec=True) as call_rpc: + call_rpc.return_value = (mock.sentinel.A, mock.sentinel.B) + with mock.patch.object(Policy, 'on_response', autospec=True) as on_res: + on_res.side_effect = KeyboardInterrupt + consumer._blocking_consume() + on_res.assert_called_once_with(consumer._policy, mock.sentinel.A) + + +@mock.patch.object(thread.Policy, 'call_rpc', autospec=True) +@mock.patch.object(thread.Policy, 'on_response', autospec=True) +@mock.patch.object(thread.Policy, 'on_exception', autospec=True) +def test_blocking_consume_exception_reraise(on_exc, on_res, call_rpc): + consumer = create_consumer() + Policy = type(consumer._policy) + + # Establish that we get responses until we are sent the exiting event. + call_rpc.return_value = (mock.sentinel.A, mock.sentinel.B) + on_res.side_effect = TypeError('Bad things!') + on_exc.side_effect = on_res.side_effect + with pytest.raises(TypeError): + consumer._blocking_consume() + + +def test_start_consuming(): + consumer = create_consumer() + helper_threads = consumer.helper_threads + with mock.patch.object(helper_threads, 'start', autospec=True) as start: + consumer.start_consuming() + assert consumer._exiting.is_set() is False + assert consumer.active is True + start.assert_called_once_with( + 'consume bidirectional stream', + consumer._request_queue, + consumer._blocking_consume, + ) From d467719e732adbc3f82caa4df4855f402abc9929 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 26 Jul 2017 08:54:21 -0700 Subject: [PATCH 24/63] Fix minor linting error. --- pubsub/nox.py | 12 ++++++------ .../tests/unit/pubsub_v1/subscriber/test_consumer.py | 1 - 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/pubsub/nox.py b/pubsub/nox.py index 209ed41f9bfc..0950c73db088 100644 --- a/pubsub/nox.py +++ b/pubsub/nox.py @@ -35,10 +35,9 @@ def unit_tests(session, python_version): session.install('-e', '.') # Run py.test against the unit tests. - session.run('py.test', '--quiet', - '--cov=google.cloud.pubsub', '--cov=tests.unit', '--cov-append', - '--cov-config=.coveragerc', '--cov-report=', '--cov-fail-under=97', - 'tests/unit', + session.run('py.test', '--quiet', '--cov-append', '--cov-report=', + '--cov=google.cloud.pubsub', '--cov=google.cloud.pubsub_v1', + '--cov=tests.unit', '--cov-config=.coveragerc', 'tests/unit' ) @@ -95,5 +94,6 @@ def cover(session): """ session.interpreter = 'python3.6' session.install('coverage', 'pytest-cov') - session.run('coverage', 'report', '--show-missing', '--fail-under=100') - session.run('coverage', 'erase') + session.run('coverage', 'html', '--fail-under=0') + # session.run('coverage', 'report', '--show-missing', '--fail-under=100') + session.run('coverage', 'erase', success_codes=(0, 1)) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py index 24d60a627989..3ab7b21e86c1 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py @@ -92,7 +92,6 @@ def test_blocking_consume_keyboard_interrupt(): @mock.patch.object(thread.Policy, 'on_exception', autospec=True) def test_blocking_consume_exception_reraise(on_exc, on_res, call_rpc): consumer = create_consumer() - Policy = type(consumer._policy) # Establish that we get responses until we are sent the exiting event. call_rpc.return_value = (mock.sentinel.A, mock.sentinel.B) From c821d33944908d73f07d73e26cf6ab615d7a4af0 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 26 Jul 2017 09:14:40 -0700 Subject: [PATCH 25/63] Histogram tests --- .../cloud/pubsub_v1/subscriber/histogram.py | 10 +-- .../pubsub_v1/subscriber/test_histogram.py | 84 +++++++++++++++++++ 2 files changed, 89 insertions(+), 5 deletions(-) create mode 100644 pubsub/tests/unit/pubsub_v1/subscriber/test_histogram.py diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py b/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py index 3f8b64ed9f73..0e3d74d68b25 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py @@ -91,7 +91,7 @@ def max(self): """ if len(self._data) == 0: return 600 - return next(iter(reversed(list(self._data.keys())))) + return next(iter(reversed(sorted(self._data.keys())))) @property def min(self): @@ -104,7 +104,7 @@ def min(self): """ if len(self._data) == 0: return 10 - return next(iter(self._data.keys())) + return next(iter(sorted(self._data.keys()))) def add(self, value): """Add the value to this histogram. @@ -143,11 +143,11 @@ def percentile(self, percent): target = len(self) - len(self) * (percent / 100) # Iterate over the values in reverse, dropping the target by the - # number of times each value has been seen. When the target reaches + # number of times each value has been seen. When the target passes # 0, return the value we are currently viewing. - for k in reversed(list(self._data.keys())): + for k in reversed(sorted(self._data.keys())): target -= self._data[k] - if target <= 0: + if target < 0: return k # The only way to get here is if there was no data. diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_histogram.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_histogram.py new file mode 100644 index 000000000000..d3e5e02a92c0 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_histogram.py @@ -0,0 +1,84 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.pubsub_v1.subscriber import histogram + + +def test_init(): + data = {} + histo = histogram.Histogram(data=data) + assert histo._data is data + assert len(histo) == 0 + + +def test_contains(): + histo = histogram.Histogram() + histo.add(10) + histo.add(20) + assert 10 in histo + assert 20 in histo + assert 30 not in histo + + +def test_max(): + histo = histogram.Histogram() + assert histo.max == 600 + histo.add(120) + assert histo.max == 120 + histo.add(150) + assert histo.max == 150 + histo.add(20) + assert histo.max == 150 + + +def test_min(): + histo = histogram.Histogram() + assert histo.min == 10 + histo.add(60) + assert histo.min == 60 + histo.add(30) + assert histo.min == 30 + histo.add(120) + assert histo.min == 30 + + +def test_add(): + histo = histogram.Histogram() + histo.add(60) + assert histo._data[60] == 1 + histo.add(60) + assert histo._data[60] == 2 + + +def test_add_lower_limit(): + histo = histogram.Histogram() + histo.add(5) + assert 5 not in histo + assert 10 in histo + + +def test_add_upper_limit(): + histo = histogram.Histogram() + histo.add(12000) + assert 12000 not in histo + assert 600 in histo + + +def test_percentile(): + histo = histogram.Histogram() + [histo.add(i) for i in range(101, 201)] + assert histo.percentile(100) == 200 + assert histo.percentile(101) == 200 + assert histo.percentile(99) == 199 + assert histo.percentile(1) == 101 From ed750b2d69359b38c4e82105e5dace41a13154a7 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 26 Jul 2017 09:50:34 -0700 Subject: [PATCH 26/63] Minor fix based on Max feedback. --- pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index 77f24f57f92d..d03c85cb8f55 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -134,9 +134,7 @@ def call_rpc(self, request_generator): and blocks if there are no outstanding requests (until such time as there are). """ - return self._client.api.streaming_pull(request_generator, - options=gax.CallOptions(timeout=600), - ) + return self._client.api.streaming_pull(request_generator) def drop(self, ack_id): """Remove the given ack ID from lease management. From 216310c7922c198b61741788981889e6cc6cfeae Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 26 Jul 2017 10:21:32 -0700 Subject: [PATCH 27/63] starting on helper thread tests --- .../subscriber/test_helper_threads.py | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py new file mode 100644 index 000000000000..216c3c8dce89 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py @@ -0,0 +1,29 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import queue +import threading + +import mock + +from google.cloud.pubsub_v1.subscriber import helper_threads + + +def test_start(): + registry = helper_threads.HelperThreadRegistry() + queue_ = queue.Queue() + target = mock.Mock(spec=()) + with mock.patch.object(threading.Thread, 'start', autospec=True) as start: + registry.start('foo', queue_, target) + assert start.called From a1fd28782749bab61336d8582ece94461e081589 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 27 Jul 2017 08:38:10 -0700 Subject: [PATCH 28/63] Add tests for helper_threads. --- .../pubsub_v1/subscriber/helper_threads.py | 2 +- .../subscriber/test_helper_threads.py | 82 +++++++++++++++++++ 2 files changed, 83 insertions(+), 1 deletion(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/helper_threads.py b/pubsub/google/cloud/pubsub_v1/subscriber/helper_threads.py index 07ada2a0def3..6fc775cae634 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/helper_threads.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/helper_threads.py @@ -80,7 +80,7 @@ def stop(self, name): """ # Attempt to retrieve the thread; if it is gone already, no-op. helper_thread = self._helper_threads.get(name) - if helper_thread.thread is None: + if helper_thread is None: return # Join the thread if it is still alive. diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py index 216c3c8dce89..a5f2c868f994 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py @@ -27,3 +27,85 @@ def test_start(): with mock.patch.object(threading.Thread, 'start', autospec=True) as start: registry.start('foo', queue_, target) assert start.called + + +def test_stop_noop(): + registry = helper_threads.HelperThreadRegistry() + assert len(registry._helper_threads) == 0 + registry.stop('foo') + assert len(registry._helper_threads) == 0 + + +def test_stop_dead_thread(): + registry = helper_threads.HelperThreadRegistry() + registry._helper_threads['foo'] = helper_threads._HelperThread( + name='foo', + queue=None, + thread=threading.Thread(target=lambda: None), + ) + assert len(registry._helper_threads) == 1 + registry.stop('foo') + assert len(registry._helper_threads) == 0 + + +@mock.patch.object(queue.Queue, 'put') +@mock.patch.object(threading.Thread, 'is_alive') +@mock.patch.object(threading.Thread, 'join') +def test_stop_alive_thread(join, is_alive, put): + is_alive.return_value = True + + # Set up a registry with a helper thread in it. + registry = helper_threads.HelperThreadRegistry() + registry._helper_threads['foo'] = helper_threads._HelperThread( + name='foo', + queue=queue.Queue(), + thread=threading.Thread(target=lambda: None), + ) + + # Assert that the helper thread is present, and removed correctly + # on stop. + assert len(registry._helper_threads) == 1 + registry.stop('foo') + assert len(registry._helper_threads) == 0 + + # Assert that all of our mocks were called in the expected manner. + is_alive.assert_called_once_with() + join.assert_called_once_with() + put.assert_called_once_with(helper_threads.STOP) + + +def test_stop_all(): + registry = helper_threads.HelperThreadRegistry() + registry._helper_threads['foo'] = helper_threads._HelperThread( + name='foo', + queue=None, + thread=threading.Thread(target=lambda: None), + ) + assert len(registry._helper_threads) == 1 + registry.stop_all() + assert len(registry._helper_threads) == 0 + + +def test_stop_all_noop(): + registry = helper_threads.HelperThreadRegistry() + assert len(registry._helper_threads) == 0 + registry.stop_all() + assert len(registry._helper_threads) == 0 + + +def test_queue_callback_thread(): + queue_ = queue.Queue() + callback = mock.Mock(spec=()) + qct = helper_threads.QueueCallbackThread(queue_, callback) + + # Set up an appropriate mock for the queue, and call the queue callback + # thread. + with mock.patch.object(queue.Queue, 'get') as get: + get.side_effect = (mock.sentinel.A, helper_threads.STOP) + qct() + + # Assert that we got the expected calls. + assert get.call_count == 2 + assert get.mock_calls[0][1][0] == mock.sentinel.A + assert get.mock_calls[1][1][0] == helper_threads.STOP + callback.assert_called_once_with(mock.sentinel.A) From 32701e16d42649f24e88957e58fe6c3ff63e3e8e Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 27 Jul 2017 10:38:52 -0700 Subject: [PATCH 29/63] Almost done with unit tests. --- .../pubsub_v1/subscriber/helper_threads.py | 3 + pubsub/google/cloud/pubsub_v1/types.py | 8 +- pubsub/nox.py | 2 +- .../subscriber/test_helper_threads.py | 2 - .../unit/pubsub_v1/subscriber/test_message.py | 92 +++++++++++++++ .../subscriber/test_policy_thread.py | 108 ++++++++++++++++++ pubsub/tests/unit/test_pubsub.py | 22 ++++ 7 files changed, 233 insertions(+), 4 deletions(-) create mode 100644 pubsub/tests/unit/pubsub_v1/subscriber/test_message.py create mode 100644 pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py create mode 100644 pubsub/tests/unit/test_pubsub.py diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/helper_threads.py b/pubsub/google/cloud/pubsub_v1/subscriber/helper_threads.py index 6fc775cae634..42bfab4b4a51 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/helper_threads.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/helper_threads.py @@ -43,6 +43,9 @@ class HelperThreadRegistry(object): def __init__(self): self._helper_threads = {} + def __contains__(self, needle): + return needle in self._helper_threads + def start(self, name, queue, target, *args, **kwargs): """Create and start a helper thread. diff --git a/pubsub/google/cloud/pubsub_v1/types.py b/pubsub/google/cloud/pubsub_v1/types.py index f7ab43d1ea4e..f770da096a39 100644 --- a/pubsub/google/cloud/pubsub_v1/types.py +++ b/pubsub/google/cloud/pubsub_v1/types.py @@ -21,6 +21,7 @@ from google.cloud.proto.pubsub.v1 import pubsub_pb2 from google.gax.utils.messages import get_messages +from google.protobuf import timestamp_pb2 # Define the default values for batching. @@ -51,7 +52,12 @@ ) -names = ['BatchSettings', 'FlowControl'] +# Pub/Sub uses timestamps from the common protobuf package. +# Do not make users import from there. +Timestamp = timestamp_pb2.Timestamp + + +names = ['BatchSettings', 'FlowControl', 'Timestamp'] for name, message in get_messages(pubsub_pb2).items(): setattr(sys.modules[__name__], name, message) names.append(name) diff --git a/pubsub/nox.py b/pubsub/nox.py index 0950c73db088..cc06d5bf1b2a 100644 --- a/pubsub/nox.py +++ b/pubsub/nox.py @@ -37,7 +37,7 @@ def unit_tests(session, python_version): # Run py.test against the unit tests. session.run('py.test', '--quiet', '--cov-append', '--cov-report=', '--cov=google.cloud.pubsub', '--cov=google.cloud.pubsub_v1', - '--cov=tests.unit', '--cov-config=.coveragerc', 'tests/unit' + '--cov-config=.coveragerc', 'tests/unit' ) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py index a5f2c868f994..f1234eefc4b2 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py @@ -106,6 +106,4 @@ def test_queue_callback_thread(): # Assert that we got the expected calls. assert get.call_count == 2 - assert get.mock_calls[0][1][0] == mock.sentinel.A - assert get.mock_calls[1][1][0] == helper_threads.STOP callback.assert_called_once_with(mock.sentinel.A) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py new file mode 100644 index 000000000000..dc9d4de055de --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py @@ -0,0 +1,92 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +import mock + +from google.cloud.pubsub_v1 import subscriber +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.subscriber import message +from google.cloud.pubsub_v1.subscriber.policy import thread + + +def create_message(data, ack_id='ACKID', **attrs): + client = subscriber.Client() + policy = thread.Policy(client, 'sub_name') + with mock.patch.object(message.Message, 'lease') as lease: + with mock.patch.object(time, 'time') as time_: + time_.return_value = 1335020400 + msg = message.Message(policy, ack_id, types.PubsubMessage( + attributes=attrs, + data=data, + message_id='message_id', + publish_time=types.Timestamp(seconds=1335020400 - 86400), + )) + lease.assert_called_once_with() + return msg + + +def test_attributes(): + msg = create_message(b'foo', baz='bacon', spam='eggs') + assert msg.attributes == {'baz': 'bacon', 'spam': 'eggs'} + + +def test_data(): + msg = create_message(b'foo') + assert msg.data == b'foo' + + +def test_publish_time(): + msg = create_message(b'foo') + assert msg.publish_time == types.Timestamp(seconds=1335020400 - 86400) + + +def test_ack(): + msg = create_message(b'foo', ack_id='bogus_ack_id') + with mock.patch.object(thread.Policy, 'ack') as ack: + with mock.patch.object(message.Message, 'drop') as drop: + msg.ack() + ack.assert_called_once_with('bogus_ack_id') + drop.assert_called_once_with() + + +def test_drop(): + msg = create_message(b'foo', ack_id='bogus_ack_id') + with mock.patch.object(thread.Policy, 'drop') as drop: + msg.drop() + drop.assert_called_once_with('bogus_ack_id') + + +def test_lease(): + msg = create_message(b'foo', ack_id='bogus_ack_id') + with mock.patch.object(thread.Policy, 'lease') as lease: + msg.lease() + lease.assert_called_once_with('bogus_ack_id') + + +def test_modify_ack_deadline(): + msg = create_message(b'foo', ack_id='bogus_ack_id') + with mock.patch.object(thread.Policy, 'modify_ack_deadline') as mad: + msg.modify_ack_deadline(60) + mad.assert_called_once_with('bogus_ack_id', 60) + + +def test_nack(): + msg = create_message(b'foo') + with mock.patch.object(message.Message, 'modify_ack_deadline') as mad: + with mock.patch.object(message.Message, 'drop') as drop: + msg.nack() + mad.assert_called_once_with(seconds=0) + drop.assert_called_once_with() diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py new file mode 100644 index 000000000000..11a57893002f --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py @@ -0,0 +1,108 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading + +import grpc + +import mock + +import pytest + +from google.cloud.pubsub_v1 import subscriber +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.subscriber import helper_threads +from google.cloud.pubsub_v1.subscriber import message +from google.cloud.pubsub_v1.subscriber.policy import thread + + +def create_policy(): + client = subscriber.Client() + return thread.Policy(client, 'sub_name') + + +def test_init(): + policy = create_policy() + policy._callback(None) + + +def test_close(): + policy = create_policy() + consumer = policy._consumer + with mock.patch.object(consumer, 'stop_consuming') as stop_consuming: + policy.close() + stop_consuming.assert_called_once_with() + assert 'callback request worker' not in policy._consumer.helper_threads + + +@mock.patch.object(helper_threads.HelperThreadRegistry, 'start') +@mock.patch.object(threading.Thread, 'start') +def test_open(thread_start, htr_start): + policy = create_policy() + with mock.patch.object(policy._consumer, 'start_consuming') as consuming: + policy.open(mock.sentinel.CALLBACK) + assert policy._callback is mock.sentinel.CALLBACK + consuming.assert_called_once_with() + htr_start.assert_called() + thread_start.assert_called() + + +def test_on_callback_request(): + policy = create_policy() + with mock.patch.object(policy, 'call_rpc') as call_rpc: + policy.on_callback_request(('call_rpc', 'something', 42)) + call_rpc.assert_called_once_with('something', 42) + + +def test_on_exception_deadline_exceeded(): + policy = create_policy() + exc = mock.Mock(spec=('code',)) + exc.code.return_value = grpc.StatusCode.DEADLINE_EXCEEDED + assert policy.on_exception(exc) is None + + +def test_on_exception_other(): + policy = create_policy() + exc = TypeError('wahhhhhh') + with pytest.raises(TypeError): + policy.on_exception(exc) + + +def test_on_response(): + callback = mock.Mock(spec=()) + + # Set up the policy. + policy = create_policy() + policy._callback = callback + + # Set up the messages to send. + messages = ( + types.PubsubMessage(data=b'foo', message_id='1'), + types.PubsubMessage(data=b'bar', message_id='2'), + ) + + # Set up a valid response. + response = types.StreamingPullResponse( + received_messages=[ + {'ack_id': 'fack', 'message': messages[0]}, + {'ack_id': 'back', 'message': messages[1]}, + ], + ) + + # Actually run the method and prove that the callback was + # called in the expected way. + policy.on_response(response) + assert callback.call_count == 2 + for call in callback.mock_calls: + assert isinstance(call[1][0], message.Message) diff --git a/pubsub/tests/unit/test_pubsub.py b/pubsub/tests/unit/test_pubsub.py new file mode 100644 index 000000000000..605dbddd7601 --- /dev/null +++ b/pubsub/tests/unit/test_pubsub.py @@ -0,0 +1,22 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud import pubsub +from google.cloud import pubsub_v1 + + +def test_exported_things(): + assert pubsub.PublisherClient is pubsub_v1.PublisherClient + assert pubsub.SubscriberClient is pubsub_v1.SubscriberClient + assert pubsub.types is pubsub_v1.types From 34272addf41ad202032b16d2dda3a7193d4597e0 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 27 Jul 2017 14:06:29 -0700 Subject: [PATCH 30/63] Full coverage. --- .../cloud/pubsub_v1/subscriber/policy/base.py | 10 +- .../pubsub_v1/subscriber/policy/thread.py | 1 - pubsub/nox.py | 5 +- .../pubsub_v1/subscriber/test_policy_base.py | 139 ++++++++++++++++++ 4 files changed, 147 insertions(+), 8 deletions(-) create mode 100644 pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index d03c85cb8f55..b52d03ba6078 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -21,8 +21,6 @@ import six -from google import gax - from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.subscriber import consumer from google.cloud.pubsub_v1.subscriber import histogram @@ -81,7 +79,11 @@ def ack_deadline(self): Returns: int: The correct ack deadline. """ - if len(self.histogram) > self._last_histogram_size * 2: + target = min([ + self._last_histogram_size * 2, + self._last_histogram_size + 100, + ]) + if len(self.histogram) > target: self._ack_deadline = self.histogram.percentile(percent=99) return self._ack_deadline @@ -220,7 +222,7 @@ def nack(self, ack_id): Args: ack_id (str): The ack ID. """ - return self.modify_ack_deadline(ack_id, 0) + return self.modify_ack_deadline(ack_id=ack_id, seconds=0) @abc.abstractmethod def on_response(self, response): diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py index acb318006625..0b0603ee7eac 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -40,7 +40,6 @@ def __init__(self, client, subscription): self._callback = lambda message: None # Create a manager for keeping track of shared state. - self._managed_ack_ids = set() self._request_queue = queue.Queue() # Call the superclass constructor. diff --git a/pubsub/nox.py b/pubsub/nox.py index cc06d5bf1b2a..6931878c54e1 100644 --- a/pubsub/nox.py +++ b/pubsub/nox.py @@ -94,6 +94,5 @@ def cover(session): """ session.interpreter = 'python3.6' session.install('coverage', 'pytest-cov') - session.run('coverage', 'html', '--fail-under=0') - # session.run('coverage', 'report', '--show-missing', '--fail-under=100') - session.run('coverage', 'erase', success_codes=(0, 1)) + session.run('coverage', 'report', '--show-missing', '--fail-under=100') + session.run('coverage', 'erase') diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py new file mode 100644 index 000000000000..3c133be5d809 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py @@ -0,0 +1,139 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +import mock + +from google.cloud.pubsub_v1 import subscriber +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.subscriber.policy import thread + + +def create_policy(): + client = subscriber.Client() + return thread.Policy(client, 'sub_name') + + +def test_ack_deadline(): + policy = create_policy() + assert policy.ack_deadline == 10 + policy.histogram.add(20) + assert policy.ack_deadline == 20 + policy.histogram.add(10) + assert policy.ack_deadline == 20 + + +def test_initial_request(): + policy = create_policy() + initial_request = policy.initial_request + assert isinstance(initial_request, types.StreamingPullRequest) + assert initial_request.subscription == 'sub_name' + assert initial_request.stream_ack_deadline_seconds == 10 + + +def test_managed_ack_ids(): + policy = create_policy() + + # Ensure we always get a set back, even if the property is not yet set. + managed_ack_ids = policy.managed_ack_ids + assert isinstance(managed_ack_ids, set) + + # Ensure that multiple calls give the same actual object back. + assert managed_ack_ids is policy.managed_ack_ids + + +def test_subscription(): + policy = create_policy() + assert policy.subscription == 'sub_name' + + +def test_ack(): + policy = create_policy() + with mock.patch.object(policy._consumer, 'send_request') as send_request: + policy.ack('ack_id_string') + send_request.assert_called_once_with(types.StreamingPullRequest( + ack_ids=['ack_id_string'], + )) + + +def test_call_rpc(): + policy = create_policy() + with mock.patch.object(policy._client.api, 'streaming_pull') as pull: + policy.call_rpc(mock.sentinel.GENERATOR) + pull.assert_called_once_with(mock.sentinel.GENERATOR) + + +def test_drop(): + policy = create_policy() + policy.managed_ack_ids.add('ack_id_string') + policy.drop('ack_id_string') + assert len(policy.managed_ack_ids) == 0 + + +def test_modify_ack_deadline(): + policy = create_policy() + with mock.patch.object(policy._consumer, 'send_request') as send_request: + policy.modify_ack_deadline('ack_id_string', 60) + send_request.assert_called_once_with(types.StreamingPullRequest( + modify_deadline_ack_ids=['ack_id_string'], + modify_deadline_seconds=[60], + )) + + +def test_maintain_leases_inactive_consumer(): + policy = create_policy() + policy._consumer.active = False + assert policy.maintain_leases() is None + + +def test_maintain_leases_ack_ids(): + policy = create_policy() + policy._consumer.active = True + policy.lease('my ack id') + + # Mock the sleep object. + with mock.patch.object(time, 'sleep', autospec=True) as sleep: + def trigger_inactive(seconds): + assert 0 < seconds < 10 + policy._consumer.active = False + sleep.side_effect = trigger_inactive + + # Also mock the consumer, which sends the request. + with mock.patch.object(policy._consumer, 'send_request') as send: + policy.maintain_leases() + send.assert_called_once_with(types.StreamingPullRequest( + modify_deadline_ack_ids=['my ack id'], + modify_deadline_seconds=[10], + )) + sleep.assert_called() + + +def test_maintain_leases_no_ack_ids(): + policy = create_policy() + policy._consumer.active = True + with mock.patch.object(time, 'sleep', autospec=True) as sleep: + def trigger_inactive(seconds): + assert 0 < seconds < 10 + policy._consumer.active = False + sleep.side_effect = trigger_inactive + policy.maintain_leases() + sleep.assert_called() + + +def test_nack(): + policy = create_policy() + with mock.patch.object(policy, 'modify_ack_deadline') as mad: + policy.nack('ack_id_string') + mad.assert_called_once_with(ack_id='ack_id_string', seconds=0) From e1c7c84c01f23aa88a777fa5b11f91ac50d5dece Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 31 Jul 2017 12:08:40 -0700 Subject: [PATCH 31/63] Do not send policy across the concurrency boundary. --- .../cloud/pubsub_v1/subscriber/message.py | 23 ++++++++-------- .../cloud/pubsub_v1/subscriber/policy/base.py | 7 ++++- .../pubsub_v1/subscriber/policy/thread.py | 4 +-- .../unit/pubsub_v1/subscriber/test_message.py | 27 +++++++++---------- .../pubsub_v1/subscriber/test_policy_base.py | 12 +++++++++ 5 files changed, 43 insertions(+), 30 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py index e2bf9415f9a8..83797a248fc0 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/message.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py @@ -42,7 +42,7 @@ class Message(object): publish_time (datetime): The time that this message was originally published. """ - def __init__(self, policy, ack_id, message): + def __init__(self, message, ack_id, request_queue): """Construct the Message. .. note:: @@ -51,16 +51,16 @@ def __init__(self, policy, ack_id, message): responsibility of :class:`BasePolicy` subclasses to do so. Args: - policy (~.pubsub_v1.subscriber.policy.BasePolicy): The policy - that created this message, and understands how to handle - actions from that message (e.g. acks). - ack_id (str): The ack_id received from Pub/Sub. message (~.pubsub_v1.types.PubsubMessage): The message received from Pub/Sub. + ack_id (str): The ack_id received from Pub/Sub. + request_queue (queue.Queue): A queue provided by the policy that + can accept requests; the policy is responsible for handling + those requests. """ - self._policy = policy - self._ack_id = ack_id self._message = message + self._ack_id = ack_id + self._request_queue = request_queue self.message_id = message.message_id # The instantiation time is the time that this message @@ -127,8 +127,7 @@ def ack(self): receive any given message more than once. """ time_to_ack = math.ceil(time.time() - self._received_timestamp) - self._policy.histogram.add(time_to_ack) - self._policy.ack(self._ack_id) + self._request_queue.put(('ack', self._ack_id, time_to_ack)) self.drop() def drop(self): @@ -144,7 +143,7 @@ def drop(self): both call this one. You probably do not want to call this method directly. """ - self._policy.drop(self._ack_id) + self._request_queue.put(('drop', self._ack_id)) def lease(self): """Inform the policy to lease this message continually. @@ -153,7 +152,7 @@ def lease(self): This method is called by the constructor, and you should never need to call it manually. """ - self._policy.lease(self._ack_id) + self._request_queue.put(('lease', self._ack_id)) def modify_ack_deadline(self, seconds): """Set the deadline for acknowledgement to the given value. @@ -173,7 +172,7 @@ def modify_ack_deadline(self, seconds): to. This should be between 0 and 600. Due to network latency, values below 10 are advised against. """ - self._policy.modify_ack_deadline(self._ack_id, seconds) + self._request_queue.put(('modify_ack_deadline', self._ack_id, seconds)) def nack(self): """Decline to acknowldge the given message. diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index b52d03ba6078..98f4789c181e 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -119,12 +119,17 @@ def subscription(self): """ return self._subscription - def ack(self, ack_id): + def ack(self, ack_id, time_to_ack=None): """Acknowledge the message corresponding to the given ack_id. Args: ack_id (str): The ack ID. + time_to_ack (int): The time it took to ack the message, measured + from when it was received from the subscription. This is used + to improve the automatic ack timing. """ + if time_to_ack is not None: + self.histogram.add(int(time_to_ack)) request = types.StreamingPullRequest(ack_ids=[ack_id]) self._consumer.send_request(request) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py index 0b0603ee7eac..e0371740c9db 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -39,7 +39,7 @@ def __init__(self, client, subscription): # Default the callback to a no-op; it is provided by `.open`. self._callback = lambda message: None - # Create a manager for keeping track of shared state. + # Create a queue for keeping track of shared state. self._request_queue = queue.Queue() # Call the superclass constructor. @@ -113,5 +113,5 @@ def on_response(self, response): """ for msg in response.received_messages: logger.debug('New message received from Pub/Sub: %r', msg) - message = Message(self, msg.ack_id, msg.message) + message = Message(msg.message, msg.ack_id, self._request_queue) self._executor.submit(self._callback, message) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py index dc9d4de055de..ca132a567ef5 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py @@ -12,28 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. +import queue import time import mock -from google.cloud.pubsub_v1 import subscriber from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.subscriber import message -from google.cloud.pubsub_v1.subscriber.policy import thread def create_message(data, ack_id='ACKID', **attrs): - client = subscriber.Client() - policy = thread.Policy(client, 'sub_name') with mock.patch.object(message.Message, 'lease') as lease: with mock.patch.object(time, 'time') as time_: time_.return_value = 1335020400 - msg = message.Message(policy, ack_id, types.PubsubMessage( + msg = message.Message(types.PubsubMessage( attributes=attrs, data=data, message_id='message_id', publish_time=types.Timestamp(seconds=1335020400 - 86400), - )) + ), ack_id, queue.Queue()) lease.assert_called_once_with() return msg @@ -55,32 +52,32 @@ def test_publish_time(): def test_ack(): msg = create_message(b'foo', ack_id='bogus_ack_id') - with mock.patch.object(thread.Policy, 'ack') as ack: + with mock.patch.object(msg._request_queue, 'put') as put: with mock.patch.object(message.Message, 'drop') as drop: msg.ack() - ack.assert_called_once_with('bogus_ack_id') + put.assert_called_once_with(('ack', 'bogus_ack_id', mock.ANY)) drop.assert_called_once_with() def test_drop(): msg = create_message(b'foo', ack_id='bogus_ack_id') - with mock.patch.object(thread.Policy, 'drop') as drop: + with mock.patch.object(msg._request_queue, 'put') as put: msg.drop() - drop.assert_called_once_with('bogus_ack_id') + put.assert_called_once_with(('drop', 'bogus_ack_id')) def test_lease(): msg = create_message(b'foo', ack_id='bogus_ack_id') - with mock.patch.object(thread.Policy, 'lease') as lease: + with mock.patch.object(msg._request_queue, 'put') as put: msg.lease() - lease.assert_called_once_with('bogus_ack_id') + put.assert_called_once_with(('lease', 'bogus_ack_id')) def test_modify_ack_deadline(): - msg = create_message(b'foo', ack_id='bogus_ack_id') - with mock.patch.object(thread.Policy, 'modify_ack_deadline') as mad: + msg = create_message(b'foo', ack_id='bogus_id') + with mock.patch.object(msg._request_queue, 'put') as put: msg.modify_ack_deadline(60) - mad.assert_called_once_with('bogus_ack_id', 60) + put.assert_called_once_with(('modify_ack_deadline', 'bogus_id', 60)) def test_nack(): diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py index 3c133be5d809..563159fa4bed 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py @@ -60,12 +60,24 @@ def test_subscription(): def test_ack(): + policy = create_policy() + with mock.patch.object(policy._consumer, 'send_request') as send_request: + policy.ack('ack_id_string', 20) + send_request.assert_called_once_with(types.StreamingPullRequest( + ack_ids=['ack_id_string'], + )) + assert len(policy.histogram) == 1 + assert 20 in policy.histogram + + +def test_ack_no_time(): policy = create_policy() with mock.patch.object(policy._consumer, 'send_request') as send_request: policy.ack('ack_id_string') send_request.assert_called_once_with(types.StreamingPullRequest( ack_ids=['ack_id_string'], )) + assert len(policy.histogram) == 0 def test_call_rpc(): From 2b21f48edbce3db6260854dfc6b9434dc9c952e4 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 31 Jul 2017 13:53:43 -0700 Subject: [PATCH 32/63] Shift flow control to the policy class. --- .../cloud/pubsub_v1/subscriber/client.py | 12 +++++------ .../cloud/pubsub_v1/subscriber/policy/base.py | 6 +++++- .../pubsub_v1/subscriber/policy/thread.py | 20 +++++++++++++++++-- 3 files changed, 28 insertions(+), 10 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/client.py b/pubsub/google/cloud/pubsub_v1/subscriber/client.py index 58fa66881ba7..afb9f7d7ca75 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/client.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/client.py @@ -19,6 +19,7 @@ from google.cloud.gapic.pubsub.v1 import subscriber_client from google.cloud.pubsub_v1 import _gapic +from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.subscriber.policy import thread @@ -35,9 +36,6 @@ class Client(object): get sensible defaults. Args: - flow_control (~.pubsub_v1.types.FlowControl): The flow control - settings. Use this to prevent situations where you are - inundated with too many messages at once. policy_class (class): A class that describes how to handle subscriptions. You may subclass the :class:`.pubsub_v1.subscriber.policy.base.BasePolicy` @@ -50,8 +48,7 @@ class in order to define your own consumer. This is primarily Generally, you should not need to set additional keyword arguments. """ - def __init__(self, flow_control=(), policy_class=thread.Policy, - **kwargs): + def __init__(self, policy_class=thread.Policy, **kwargs): # Add the metrics headers, and instantiate the underlying GAPIC # client. kwargs['lib_name'] = 'gccl' @@ -62,7 +59,7 @@ def __init__(self, flow_control=(), policy_class=thread.Policy, # messages. self._policy_class = policy_class - def subscribe(self, subscription, callback=None): + def subscribe(self, subscription, callback=None, flow_control=()): """Return a representation of an individual subscription. This method creates and returns a ``Consumer`` object (that is, a @@ -94,7 +91,8 @@ def subscribe(self, subscription, callback=None): ~.pubsub_v1.subscriber.consumer.base.BaseConsumer: An instance of the defined ``consumer_class`` on the client. """ - subscr = self._policy_class(self, subscription) + flow_control = types.FlowControl(*flow_control) + subscr = self._policy_class(self, subscription, flow_control) if callable(callback): subscr.open(callback) return subscr diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index 98f4789c181e..68dc2941e371 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -41,7 +41,8 @@ class BasePolicy(object): subclasses may be passed as the ``policy_class`` argument to :class:`~.pubsub_v1.client.SubscriberClient`. """ - def __init__(self, client, subscription, histogram_data=None): + def __init__(self, client, subscription, + flow_control=types.FlowControl(), histogram_data=None): """Instantiate the policy. Args: @@ -50,6 +51,8 @@ def __init__(self, client, subscription, histogram_data=None): subscription (str): The name of the subscription. The canonical format for this is ``projects/{project}/subscriptions/{subscription}``. + flow_control (~.pubsub_v1.types.FlowControl): The flow control + settings. histogram_data (dict): Optional: A structure to store the histogram data for predicting appropriate ack times. If set, this should be a dictionary-like object. @@ -66,6 +69,7 @@ def __init__(self, client, subscription, histogram_data=None): self._consumer = consumer.Consumer(self) self._ack_deadline = 10 self._last_histogram_size = 0 + self.flow_control = flow_control self.histogram = histogram.Histogram(data=histogram_data) @property diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py index e0371740c9db..ac69b3ec46dc 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -21,6 +21,7 @@ import grpc +from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.subscriber import helper_threads from google.cloud.pubsub_v1.subscriber.policy import base from google.cloud.pubsub_v1.subscriber.message import Message @@ -35,7 +36,18 @@ class Policy(base.BasePolicy): This consumer handles the connection to the Pub/Sub service and all of the concurrency needs. """ - def __init__(self, client, subscription): + def __init__(self, client, subscription, flow_control=types.FlowControl()): + """Instantiate the policy. + + Args: + client (~.pubsub_v1.subscriber.client): The subscriber client used + to create this instance. + subscription (str): The name of the subscription. The canonical + format for this is + ``projects/{project}/subscriptions/{subscription}``. + flow_control (~.pubsub_v1.types.FlowControl): The flow control + settings. + """ # Default the callback to a no-op; it is provided by `.open`. self._callback = lambda message: None @@ -43,7 +55,11 @@ def __init__(self, client, subscription): self._request_queue = queue.Queue() # Call the superclass constructor. - super(Policy, self).__init__(client, subscription) + super(Policy, self).__init__( + client=client, + flow_control=flow_control, + subscription=subscription, + ) # Also maintain a request queue and an executor. logger.debug('Creating callback requests thread (not starting).') From 7f4b91c0fac80760ec5092b38ffd04e090f6ff4d Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 31 Jul 2017 14:33:26 -0700 Subject: [PATCH 33/63] Move the request queue to using keyword arguments. --- .../cloud/pubsub_v1/subscriber/message.py | 25 ++++++++++++++++--- .../cloud/pubsub_v1/subscriber/policy/base.py | 16 +++++++++--- .../pubsub_v1/subscriber/policy/thread.py | 4 +-- .../unit/pubsub_v1/subscriber/test_message.py | 20 ++++++++++++--- .../pubsub_v1/subscriber/test_policy_base.py | 23 +++++++++++++++-- .../subscriber/test_policy_thread.py | 4 +-- 6 files changed, 74 insertions(+), 18 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py index 83797a248fc0..0fdf73643f54 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/message.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py @@ -112,6 +112,11 @@ def publish_time(self): """ return self._message.publish_time + @property + def size(self): + """Return the size of the underlying message, in bytes.""" + return self._message.ByteSize() + def ack(self): """Acknowledge the given message. @@ -127,7 +132,10 @@ def ack(self): receive any given message more than once. """ time_to_ack = math.ceil(time.time() - self._received_timestamp) - self._request_queue.put(('ack', self._ack_id, time_to_ack)) + self._request_queue.put(('ack', { + 'ack_id': self._ack_id, + 'time_to_ack': time_to_ack, + })) self.drop() def drop(self): @@ -143,7 +151,10 @@ def drop(self): both call this one. You probably do not want to call this method directly. """ - self._request_queue.put(('drop', self._ack_id)) + self._request_queue.put(('drop', { + 'ack_id': self._ack_id, + 'byte_size': self.size, + })) def lease(self): """Inform the policy to lease this message continually. @@ -152,7 +163,10 @@ def lease(self): This method is called by the constructor, and you should never need to call it manually. """ - self._request_queue.put(('lease', self._ack_id)) + self._request_queue.put(('lease', { + 'ack_id': self._ack_id, + 'byte_size': self.size, + })) def modify_ack_deadline(self, seconds): """Set the deadline for acknowledgement to the given value. @@ -172,7 +186,10 @@ def modify_ack_deadline(self, seconds): to. This should be between 0 and 600. Due to network latency, values below 10 are advised against. """ - self._request_queue.put(('modify_ack_deadline', self._ack_id, seconds)) + self._request_queue.put(('modify_ack_deadline', { + 'ack_id': self._ack_id, + 'seconds': seconds, + })) def nack(self): """Decline to acknowldge the given message. diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index 68dc2941e371..a2701b7b9ebb 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -69,6 +69,7 @@ def __init__(self, client, subscription, self._consumer = consumer.Consumer(self) self._ack_deadline = 10 self._last_histogram_size = 0 + self._bytes = 0 self.flow_control = flow_control self.histogram = histogram.Histogram(data=histogram_data) @@ -147,21 +148,28 @@ def call_rpc(self, request_generator): """ return self._client.api.streaming_pull(request_generator) - def drop(self, ack_id): + def drop(self, ack_id, byte_size): """Remove the given ack ID from lease management. Args: ack_id (str): The ack ID. + byte_size (int): The size of the PubSub message, in bytes. """ - self.managed_ack_ids.remove(ack_id) + if ack_id in self.managed_ack_ids: + self.managed_ack_ids.remove(ack_id) + self._bytes -= byte_size + self._bytes = min([self._bytes, 0]) - def lease(self, ack_id): + def lease(self, ack_id, byte_size): """Add the given ack ID to lease management. Args: ack_id (str): The ack ID. + byte_size (int): The size of the PubSub message, in bytes. """ - self.managed_ack_ids.add(ack_id) + if ack_id not in self.managed_ack_ids: + self.managed_ack_ids.add(ack_id) + self._bytes += byte_size def maintain_leases(self): """Maintain all of the leases being managed by the policy. diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py index ac69b3ec46dc..1f29f53c92f1 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -105,8 +105,8 @@ def open(self, callback): def on_callback_request(self, callback_request): """Map the callback request to the appropriate GRPC request.""" - action, args = callback_request[0], callback_request[1:] - getattr(self, action)(*args) + action, kwargs = callback_request[0], callback_request[1] + getattr(self, action)(**kwargs) def on_exception(self, exception): """Bubble the exception. diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py index ca132a567ef5..391a6db59240 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py @@ -55,7 +55,10 @@ def test_ack(): with mock.patch.object(msg._request_queue, 'put') as put: with mock.patch.object(message.Message, 'drop') as drop: msg.ack() - put.assert_called_once_with(('ack', 'bogus_ack_id', mock.ANY)) + put.assert_called_once_with(('ack', { + 'ack_id': 'bogus_ack_id', + 'time_to_ack': mock.ANY, + })) drop.assert_called_once_with() @@ -63,21 +66,30 @@ def test_drop(): msg = create_message(b'foo', ack_id='bogus_ack_id') with mock.patch.object(msg._request_queue, 'put') as put: msg.drop() - put.assert_called_once_with(('drop', 'bogus_ack_id')) + put.assert_called_once_with(('drop', { + 'ack_id': 'bogus_ack_id', + 'byte_size': 25, + })) def test_lease(): msg = create_message(b'foo', ack_id='bogus_ack_id') with mock.patch.object(msg._request_queue, 'put') as put: msg.lease() - put.assert_called_once_with(('lease', 'bogus_ack_id')) + put.assert_called_once_with(('lease', { + 'ack_id': 'bogus_ack_id', + 'byte_size': 25, + })) def test_modify_ack_deadline(): msg = create_message(b'foo', ack_id='bogus_id') with mock.patch.object(msg._request_queue, 'put') as put: msg.modify_ack_deadline(60) - put.assert_called_once_with(('modify_ack_deadline', 'bogus_id', 60)) + put.assert_called_once_with(('modify_ack_deadline', { + 'ack_id': 'bogus_id', + 'seconds': 60, + })) def test_nack(): diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py index 563159fa4bed..42f416cb50ba 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py @@ -90,8 +90,15 @@ def test_call_rpc(): def test_drop(): policy = create_policy() policy.managed_ack_ids.add('ack_id_string') - policy.drop('ack_id_string') + policy._bytes = 20 + policy.drop('ack_id_string', 20) assert len(policy.managed_ack_ids) == 0 + assert policy._bytes == 0 + + # Do this again to establish idempotency. + policy.drop('ack_id_string', 20) + assert len(policy.managed_ack_ids) == 0 + assert policy._bytes == 0 def test_modify_ack_deadline(): @@ -113,7 +120,7 @@ def test_maintain_leases_inactive_consumer(): def test_maintain_leases_ack_ids(): policy = create_policy() policy._consumer.active = True - policy.lease('my ack id') + policy.lease('my ack id', 50) # Mock the sleep object. with mock.patch.object(time, 'sleep', autospec=True) as sleep: @@ -144,6 +151,18 @@ def trigger_inactive(seconds): sleep.assert_called() +def test_lease(): + policy = create_policy() + policy.lease('ack_id_string', 20) + assert len(policy.managed_ack_ids) == 1 + assert policy._bytes == 20 + + # Do this again to prove idempotency. + policy.lease('ack_id_string', 20) + assert len(policy.managed_ack_ids) == 1 + assert policy._bytes == 20 + + def test_nack(): policy = create_policy() with mock.patch.object(policy, 'modify_ack_deadline') as mad: diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py index 11a57893002f..d87848a76d9d 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py @@ -61,8 +61,8 @@ def test_open(thread_start, htr_start): def test_on_callback_request(): policy = create_policy() with mock.patch.object(policy, 'call_rpc') as call_rpc: - policy.on_callback_request(('call_rpc', 'something', 42)) - call_rpc.assert_called_once_with('something', 42) + policy.on_callback_request(('call_rpc', {'something': 42})) + call_rpc.assert_called_once_with(something=42) def test_on_exception_deadline_exceeded(): From 3852805e8b9115f491cfdc60a3eb9b3f729df8b2 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 1 Aug 2017 11:15:35 -0700 Subject: [PATCH 34/63] Can has flow control. --- .../cloud/pubsub_v1/subscriber/consumer.py | 5 +- .../cloud/pubsub_v1/subscriber/message.py | 8 +- .../cloud/pubsub_v1/subscriber/policy/base.py | 168 ++++++++++++++---- pubsub/google/cloud/pubsub_v1/types.py | 3 +- .../unit/pubsub_v1/subscriber/test_message.py | 15 +- .../pubsub_v1/subscriber/test_policy_base.py | 71 +++++++- 6 files changed, 217 insertions(+), 53 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py index a3f237d7fe63..68a9bd386201 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py @@ -200,10 +200,11 @@ def _request_generator_thread(self): """ # First, yield the initial request. This occurs on every new # connection, fundamentally including a resumed connection. + initial_request = self._policy.get_initial_request(ack_queue=True) _LOGGER.debug('Sending initial request: {initial_request}'.format( - initial_request=self._policy.initial_request, + initial_request=initial_request, )) - yield self._policy.initial_request + yield initial_request # Now yield each of the items on the request queue, and block if there # are none. This can and must block to keep the stream open. diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py index 0fdf73643f54..bb85823664c0 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/message.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py @@ -134,9 +134,9 @@ def ack(self): time_to_ack = math.ceil(time.time() - self._received_timestamp) self._request_queue.put(('ack', { 'ack_id': self._ack_id, + 'byte_size': self.size, 'time_to_ack': time_to_ack, })) - self.drop() def drop(self): """Release the message from lease management. @@ -196,5 +196,7 @@ def nack(self): This will cause the message to be re-delivered to the subscription. """ - self.modify_ack_deadline(seconds=0) - self.drop() + self._request_queue.put(('nack', { + 'ack_id': self._ack_id, + 'byte_size': self.size, + })) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index a2701b7b9ebb..dd416886b5d1 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import +from __future__ import absolute_import, division import abc import logging @@ -69,10 +69,15 @@ def __init__(self, client, subscription, self._consumer = consumer.Consumer(self) self._ack_deadline = 10 self._last_histogram_size = 0 - self._bytes = 0 self.flow_control = flow_control self.histogram = histogram.Histogram(data=histogram_data) + # These are for internal flow control tracking. + # They should not need to be used by subclasses. + self._bytes = 0 + self._ack_on_resume = set() + self._paused = False + @property def ack_deadline(self): """Return the appropriate ack deadline. @@ -92,18 +97,6 @@ def ack_deadline(self): self._ack_deadline = self.histogram.percentile(percent=99) return self._ack_deadline - @property - def initial_request(self): - """Return the initial request. - - This defines the initial request that must always be sent to Pub/Sub - immediately upon opening the subscription. - """ - return types.StreamingPullRequest( - stream_ack_deadline_seconds=self.histogram.percentile(99), - subscription=self.subscription, - ) - @property def managed_ack_ids(self): """Return the ack IDs currently being managed by the policy. @@ -124,7 +117,29 @@ def subscription(self): """ return self._subscription - def ack(self, ack_id, time_to_ack=None): + @property + def _load(self): + """Return the current load. + + The load is represented as a float, where 1.0 represents having + hit one of the flow control limits, and values between 0.0 and 1.0 + represent how close we are to them. (0.5 means we have exactly half + of what the flow control setting allows, for example.) + + There are (currently) two flow control settings; this property + computes how close the subscriber is to each of them, and returns + whichever value is higher. (It does not matter that we have lots of + running room on setting A if setting B is over.) + + Returns: + float: The load value. + """ + return max([ + len(self.managed_ack_ids) / self.flow_control.max_messages, + self._bytes / self.flow_control.max_bytes, + ]) + + def ack(self, ack_id, time_to_ack=None, byte_size=None): """Acknowledge the message corresponding to the given ack_id. Args: @@ -132,11 +147,24 @@ def ack(self, ack_id, time_to_ack=None): time_to_ack (int): The time it took to ack the message, measured from when it was received from the subscription. This is used to improve the automatic ack timing. + byte_size (int): The size of the PubSub message, in bytes. """ + # If we got timing information, add it to the histogram. if time_to_ack is not None: self.histogram.add(int(time_to_ack)) - request = types.StreamingPullRequest(ack_ids=[ack_id]) - self._consumer.send_request(request) + + # Send the request to ack the message. + # However, if the consumer is inactive, then queue the ack_id here + # instead; it will be acked as part of the initial request when the + # consumer is started again. + if self._consumer.active: + request = types.StreamingPullRequest(ack_ids=[ack_id]) + self._consumer.send_request(request) + else: + self._ack_on_resume.add(ack_id) + + # Remove the message from lease management. + self.drop(ack_id=ack_id, byte_size=byte_size) def call_rpc(self, request_generator): """Invoke the Pub/Sub streaming pull RPC. @@ -155,11 +183,70 @@ def drop(self, ack_id, byte_size): ack_id (str): The ack ID. byte_size (int): The size of the PubSub message, in bytes. """ + # Remove the ack ID from lease management, and decrement the + # byte counter. if ack_id in self.managed_ack_ids: self.managed_ack_ids.remove(ack_id) self._bytes -= byte_size self._bytes = min([self._bytes, 0]) + # If we have been paused by flow control, check and see if we are + # back within our limits. + # + # In order to not thrash too much, require us to have passed below + # the resume threshold (80% by default) of each flow control setting + # before restarting. + if self._paused and self._load < self.flow_control.resume_threshold: + self._paused = False + self.open(self._callback) + + def get_initial_request(self, ack_queue=False): + """Return the initial request. + + This defines the initial request that must always be sent to Pub/Sub + immediately upon opening the subscription. + + Args: + ack_queue (bool): Whether to include any acks that were sent + while the connection was paused. + + Returns: + ~.pubsub_v1.types.StreamingPullRequest: A request suitable + for being the first request on the stream (and not suitable + for any other purpose). + + .. note:: + If ``ack_queue`` is set to True, this includes the ack_ids, but + also clears the internal set. + + This means that calls to :meth:`get_initial_request` with + ``ack_queue`` set to True are not idempotent. + """ + # Any ack IDs that are under lease management and not being acked + # need to have their deadline extended immediately. + ack_ids = set() + lease_ids = self.managed_ack_ids + if ack_queue: + ack_ids = self._ack_on_resume + lease_ids = lease_ids.difference(ack_ids) + + # Put the request together. + request = types.StreamingPullRequest( + ack_ids=list(ack_ids), + modify_deadline_ack_ids=list(lease_ids), + modify_deadline_seconds=[self.ack_deadline] * len(lease_ids), + stream_ack_deadline_seconds=self.histogram.percentile(99), + subscription=self.subscription, + ) + + # Clear the ack_ids set. + # Note: If `ack_queue` is False, this just ends up being a no-op, + # since the set is just an empty set. + ack_ids.clear() + + # Return the initial request. + return request + def lease(self, ack_id, byte_size): """Add the given ack ID to lease management. @@ -167,10 +254,18 @@ def lease(self, ack_id, byte_size): ack_id (str): The ack ID. byte_size (int): The size of the PubSub message, in bytes. """ + # Add the ack ID to the set of managed ack IDs, and increment + # the size counter. if ack_id not in self.managed_ack_ids: self.managed_ack_ids.add(ack_id) self._bytes += byte_size + # Sanity check: Do we have too many things in our inventory? + # If we do, we need to stop the stream. + if self._load >= 1.0: + self._paused = True + self.close() + def maintain_leases(self): """Maintain all of the leases being managed by the policy. @@ -202,7 +297,7 @@ def maintain_leases(self): # it is more efficient to make a single request. ack_ids = list(self.managed_ack_ids) logger.debug('Renewing lease for %d ack IDs.' % len(ack_ids)) - if len(ack_ids) > 0: + if len(ack_ids) > 0 and self._consumer.active: request = types.StreamingPullRequest( modify_deadline_ack_ids=ack_ids, modify_deadline_seconds=[p99] * len(ack_ids), @@ -233,13 +328,33 @@ def modify_ack_deadline(self, ack_id, seconds): ) self._consumer.send_request(request) - def nack(self, ack_id): + def nack(self, ack_id, byte_size=None): """Explicitly deny receipt of a message. Args: ack_id (str): The ack ID. + byte_size (int): The size of the PubSub message, in bytes. + """ + self.modify_ack_deadline(ack_id=ack_id, seconds=0) + self.drop(ack_id=ack_id, byte_size=byte_size) + + @abc.abstractmethod + def close(self): + """Close the existing connection.""" + raise NotImplementedError + + @abc.abstractmethod + def on_exception(self, exception): + """Called when a gRPC exception occurs. + + If this method does nothing, then the stream is re-started. If this + raises an exception, it will stop the consumer thread. + This is executed on the response consumer helper thread. + + Args: + exception (Exception): The exception raised by the RPC. """ - return self.modify_ack_deadline(ack_id=ack_id, seconds=0) + raise NotImplementedError @abc.abstractmethod def on_response(self, response): @@ -262,19 +377,6 @@ def on_response(self, response): """ raise NotImplementedError - @abc.abstractmethod - def on_exception(self, exception): - """Called when a gRPC exception occurs. - - If this method does nothing, then the stream is re-started. If this - raises an exception, it will stop the consumer thread. - This is executed on the response consumer helper thread. - - Args: - exception (Exception): The exception raised by the RPC. - """ - raise NotImplementedError - @abc.abstractmethod def open(self, callback): """Open a streaming pull connection and begin receiving messages. diff --git a/pubsub/google/cloud/pubsub_v1/types.py b/pubsub/google/cloud/pubsub_v1/types.py index f770da096a39..ec92ab38524d 100644 --- a/pubsub/google/cloud/pubsub_v1/types.py +++ b/pubsub/google/cloud/pubsub_v1/types.py @@ -44,11 +44,12 @@ # these settings can be altered to tweak Pub/Sub behavior. # The defaults should be fine for most use cases. FlowControl = collections.namedtuple('FlowControl', - ['max_bytes', 'max_messages'], + ['max_bytes', 'max_messages', 'resume_threshold'], ) FlowControl.__new__.__defaults__ = ( psutil.virtual_memory().total * 0.2, # max_bytes: 20% of total RAM float('inf'), # max_messages: no limit + 0.8, # resume_threshold: 80% ) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py index 391a6db59240..a3a1e16f027e 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py @@ -57,9 +57,9 @@ def test_ack(): msg.ack() put.assert_called_once_with(('ack', { 'ack_id': 'bogus_ack_id', + 'byte_size': 25, 'time_to_ack': mock.ANY, })) - drop.assert_called_once_with() def test_drop(): @@ -93,9 +93,10 @@ def test_modify_ack_deadline(): def test_nack(): - msg = create_message(b'foo') - with mock.patch.object(message.Message, 'modify_ack_deadline') as mad: - with mock.patch.object(message.Message, 'drop') as drop: - msg.nack() - mad.assert_called_once_with(seconds=0) - drop.assert_called_once_with() + msg = create_message(b'foo', ack_id='bogus_id') + with mock.patch.object(msg._request_queue, 'put') as put: + msg.nack() + put.assert_called_once_with(('nack', { + 'ack_id': 'bogus_id', + 'byte_size': 25, + })) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py index 42f416cb50ba..b665e7898289 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py @@ -21,9 +21,9 @@ from google.cloud.pubsub_v1.subscriber.policy import thread -def create_policy(): +def create_policy(flow_control=types.FlowControl()): client = subscriber.Client() - return thread.Policy(client, 'sub_name') + return thread.Policy(client, 'sub_name', flow_control=flow_control) def test_ack_deadline(): @@ -35,9 +35,9 @@ def test_ack_deadline(): assert policy.ack_deadline == 20 -def test_initial_request(): +def test_get_initial_request(): policy = create_policy() - initial_request = policy.initial_request + initial_request = policy.get_initial_request() assert isinstance(initial_request, types.StreamingPullRequest) assert initial_request.subscription == 'sub_name' assert initial_request.stream_ack_deadline_seconds == 10 @@ -61,6 +61,7 @@ def test_subscription(): def test_ack(): policy = create_policy() + policy._consumer.active = True with mock.patch.object(policy._consumer, 'send_request') as send_request: policy.ack('ack_id_string', 20) send_request.assert_called_once_with(types.StreamingPullRequest( @@ -72,6 +73,7 @@ def test_ack(): def test_ack_no_time(): policy = create_policy() + policy._consumer.active = True with mock.patch.object(policy._consumer, 'send_request') as send_request: policy.ack('ack_id_string') send_request.assert_called_once_with(types.StreamingPullRequest( @@ -80,6 +82,14 @@ def test_ack_no_time(): assert len(policy.histogram) == 0 +def test_ack_paused(): + policy = create_policy() + policy._paused = True + policy._consumer.active = False + policy.ack('ack_id_string') + assert 'ack_id_string' in policy._ack_on_resume + + def test_call_rpc(): policy = create_policy() with mock.patch.object(policy._client.api, 'streaming_pull') as pull: @@ -101,6 +111,41 @@ def test_drop(): assert policy._bytes == 0 +def test_drop_below_threshold(): + """Establish that we resume a paused subscription. + + If the subscription is paused, and we drop sufficiently below + the flow control thresholds, it should resume. + """ + policy = create_policy() + policy.managed_ack_ids.add('ack_id_string') + policy._bytes = 20 + policy._paused = True + with mock.patch.object(policy, 'open') as open_: + policy.drop(ack_id='ack_id_string', byte_size=20) + open_.assert_called_once_with(policy._callback) + assert policy._paused is False + + +def test_load(): + flow_control = types.FlowControl(max_messages=10, max_bytes=1000) + policy = create_policy(flow_control=flow_control) + + # This should mean that our messages count is at 10%, and our bytes + # are at 15%; the ._load property should return the higher (0.15). + policy.lease(ack_id='one', byte_size=150) + assert policy._load == 0.15 + + # After this message is added, the messages should be higher at 20% + # (versus 16% for bytes). + policy.lease(ack_id='two', byte_size=10) + assert policy._load == 0.2 + + # Returning a number above 100% is fine. + policy.lease(ack_id='three', byte_size=1000) + assert policy._load == 1.16 + + def test_modify_ack_deadline(): policy = create_policy() with mock.patch.object(policy._consumer, 'send_request') as send_request: @@ -153,18 +198,30 @@ def trigger_inactive(seconds): def test_lease(): policy = create_policy() - policy.lease('ack_id_string', 20) + policy.lease(ack_id='ack_id_string', byte_size=20) assert len(policy.managed_ack_ids) == 1 assert policy._bytes == 20 # Do this again to prove idempotency. - policy.lease('ack_id_string', 20) + policy.lease(ack_id='ack_id_string', byte_size=20) assert len(policy.managed_ack_ids) == 1 assert policy._bytes == 20 +def test_lease_above_threshold(): + flow_control = types.FlowControl(max_messages=2) + policy = create_policy(flow_control=flow_control) + with mock.patch.object(policy, 'close') as close: + policy.lease(ack_id='first_ack_id', byte_size=20) + assert close.call_count == 0 + policy.lease(ack_id='second_ack_id', byte_size=25) + close.assert_called_once_with() + + def test_nack(): policy = create_policy() with mock.patch.object(policy, 'modify_ack_deadline') as mad: - policy.nack('ack_id_string') + with mock.patch.object(policy, 'drop') as drop: + policy.nack(ack_id='ack_id_string', byte_size=10) + drop.assert_called_once_with(ack_id='ack_id_string', byte_size=10) mad.assert_called_once_with(ack_id='ack_id_string', seconds=0) From 81b37f48d6b360dacf742fe93ed235bcecceab1e Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 3 Aug 2017 13:21:56 -0700 Subject: [PATCH 35/63] Subscription fixes. --- pubsub/google/cloud/pubsub_v1/subscriber/message.py | 3 ++- pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py index bb85823664c0..1e97c324b2e9 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/message.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py @@ -74,7 +74,7 @@ def __init__(self, message, ack_id, request_queue): def __repr__(self): # Get an abbreviated version of the data. - abbv_data = self._data + abbv_data = self._message.data if len(abbv_data) > 50: abbv_data = abbv_data[0:50] + b'...' @@ -83,6 +83,7 @@ def __repr__(self): answer += ' data: {0!r}\n'.format(abbv_data) answer += ' attributes: {0!r}\n'.format(self.attributes) answer += '}' + return answer @property def attributes(self): diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py index 1f29f53c92f1..c03da7f81ddc 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -129,5 +129,7 @@ def on_response(self, response): """ for msg in response.received_messages: logger.debug('New message received from Pub/Sub: %r', msg) + logger.debug(self._callback) message = Message(msg.message, msg.ack_id, self._request_queue) - self._executor.submit(self._callback, message) + future = self._executor.submit(self._callback, message) + logger.debug('Result: %s' % future.result()) From 5784d4d1de3bbad0282fadcc2bd610686a6c1fdc Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 3 Aug 2017 14:03:59 -0700 Subject: [PATCH 36/63] Change batch time, add gRPC time logging. --- .../cloud/pubsub_v1/publisher/batch/thread.py | 15 ++++++++++++--- pubsub/google/cloud/pubsub_v1/types.py | 2 +- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py index 37664ef5ffec..ecb88a50c755 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py @@ -14,6 +14,7 @@ from __future__ import absolute_import +import logging import threading import time import uuid @@ -167,10 +168,18 @@ def _commit(self): # Begin the request to publish these messages. if len(self._messages) == 0: return + + # Make the actual GRPC request. + # Log how long the underlying request takes. + start = time.time() response = self.client.api.publish( self._topic, self.messages, ) + end = time.time() + logging.getLogger().debug('gRPC Publish took {sec} seconds.'.format( + sec=end - start, + )) # We got a response from Pub/Sub; denote that we are processing. self._status = 'processing results' @@ -186,9 +195,9 @@ def _commit(self): # We are trusting that there is a 1:1 mapping, and raise an exception # if not. self._status = self.Status.SUCCESS - for message_id, fut in zip(response.message_ids, self._futures): - self.message_ids[hash(fut)] = message_id - fut._trigger() + for message_id, future in zip(response.message_ids, self._futures): + self.message_ids[hash(future)] = message_id + future._trigger() def monitor(self): """Commit this batch after sufficient time has elapsed. diff --git a/pubsub/google/cloud/pubsub_v1/types.py b/pubsub/google/cloud/pubsub_v1/types.py index ec92ab38524d..33d1353abe50 100644 --- a/pubsub/google/cloud/pubsub_v1/types.py +++ b/pubsub/google/cloud/pubsub_v1/types.py @@ -34,7 +34,7 @@ ) BatchSettings.__new__.__defaults__ = ( 1024 * 1024 * 5, # max_bytes: 5 MB - 1.0, # max_latency: 1.0 seconds + 0.05, # max_latency: 0.05 seconds 1000, # max_messages: 1,000 ) From 97d8431e9b4e3f089751c996c976fe890445f88f Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 3 Aug 2017 14:17:44 -0700 Subject: [PATCH 37/63] Unit test fix. --- .../unit/pubsub_v1/publisher/test_publisher_client.py | 2 +- .../tests/unit/pubsub_v1/subscriber/test_consumer.py | 4 ++-- .../unit/pubsub_v1/subscriber/test_policy_base.py | 10 ++++++---- .../unit/pubsub_v1/subscriber/test_policy_thread.py | 2 +- .../pubsub_v1/subscriber/test_subscriber_client.py | 4 ++-- 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py b/pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py index 6ee66d636578..f10863b92d47 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py @@ -29,7 +29,7 @@ def test_init(): # batch settings object, which should have the defaults. assert isinstance(client.api, publisher_client.PublisherClient) assert client.batch_settings.max_bytes == 5 * (2 ** 20) - assert client.batch_settings.max_latency == 1.0 + assert client.batch_settings.max_latency == 0.05 assert client.batch_settings.max_messages == 1000 diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py index 3ab7b21e86c1..05cb646a0f02 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py @@ -27,7 +27,7 @@ def create_consumer(): client = subscriber.Client() - subscription = client.subscribe('sub_name') + subscription = client.subscribe('sub_name_e') return consumer.Consumer(policy=subscription) @@ -46,7 +46,7 @@ def test_request_generator_thread(): # The first request that comes from the request generator thread # should always be the initial request. initial_request = next(generator) - assert initial_request.subscription == 'sub_name' + assert initial_request.subscription == 'sub_name_e' assert initial_request.stream_ack_deadline_seconds == 10 # Subsequent requests correspond to items placed in the request queue. diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py index b665e7898289..ca124bdcea31 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py @@ -23,7 +23,7 @@ def create_policy(flow_control=types.FlowControl()): client = subscriber.Client() - return thread.Policy(client, 'sub_name', flow_control=flow_control) + return thread.Policy(client, 'sub_name_d', flow_control=flow_control) def test_ack_deadline(): @@ -39,7 +39,7 @@ def test_get_initial_request(): policy = create_policy() initial_request = policy.get_initial_request() assert isinstance(initial_request, types.StreamingPullRequest) - assert initial_request.subscription == 'sub_name' + assert initial_request.subscription == 'sub_name_d' assert initial_request.stream_ack_deadline_seconds == 10 @@ -56,7 +56,7 @@ def test_managed_ack_ids(): def test_subscription(): policy = create_policy() - assert policy.subscription == 'sub_name' + assert policy.subscription == 'sub_name_d' def test_ack(): @@ -86,7 +86,9 @@ def test_ack_paused(): policy = create_policy() policy._paused = True policy._consumer.active = False - policy.ack('ack_id_string') + with mock.patch.object(policy, 'open') as open_: + policy.ack('ack_id_string') + open_.assert_called() assert 'ack_id_string' in policy._ack_on_resume diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py index d87848a76d9d..e715ddc79c42 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py @@ -29,7 +29,7 @@ def create_policy(): client = subscriber.Client() - return thread.Policy(client, 'sub_name') + return thread.Policy(client, 'sub_name_c') def test_init(): diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py index 77f8b016abb6..6489de321f11 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py @@ -25,7 +25,7 @@ def test_init(): def test_subscribe(): client = subscriber.Client() - subscription = client.subscribe('sub_name') + subscription = client.subscribe('sub_name_a') assert isinstance(subscription, thread.Policy) @@ -33,6 +33,6 @@ def test_subscribe_with_callback(): client = subscriber.Client() callback = mock.Mock() with mock.patch.object(thread.Policy, 'open') as open_: - subscription = client.subscribe('sub_name', callback) + subscription = client.subscribe('sub_name_b', callback) open_.assert_called_once_with(callback) assert isinstance(subscription, thread.Policy) From cb7dc05fb7abdde95d3c99c129d00354112c219b Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Fri, 4 Aug 2017 07:54:28 -0700 Subject: [PATCH 38/63] Minor RST fixes (thanks @jonparrott). --- pubsub/google/cloud/pubsub_v1/subscriber/histogram.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py b/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py index 0e3d74d68b25..09f047495896 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py @@ -34,8 +34,8 @@ def __init__(self, data=None): """Instantiate the histogram. Args: - data (dict): The data strucure to be used to store the - underlying data. The default is an empty dictionary. + data (Mapping[str, int]): The data strucure to be used to store + the underlying data. The default is an empty dictionary. This can be set to a dictionary-like object if required (for example, if a special object is needed for concurrency reasons). @@ -129,8 +129,8 @@ def percentile(self, percent): """Return the value that is the Nth precentile in the histogram. Args: - percent (int|float): The precentile being sought. The default - consumer implementations use consistently use ``99``. + percent (Union[int, float]): The precentile being sought. The + default consumer implementations use consistently use ``99``. Returns: int: The value corresponding to the requested percentile. From 69944651d54b8cf5cc6aec5332409119d1ccc80a Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Fri, 4 Aug 2017 08:02:28 -0700 Subject: [PATCH 39/63] Remove the ignore in .flake8. --- pubsub/.flake8 | 6 ------ pubsub/nox.py | 8 +++++--- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/pubsub/.flake8 b/pubsub/.flake8 index 712bd8afe7f4..25168dc87605 100644 --- a/pubsub/.flake8 +++ b/pubsub/.flake8 @@ -4,9 +4,3 @@ exclude = .git, *.pyc, conf.py - -ignore = - # Allow "under-indented" continuation lines. - E124, - # Allow closing parentheses to column-match the opening call. - E128 diff --git a/pubsub/nox.py b/pubsub/nox.py index f1f66ec5c214..c860e0741fe6 100644 --- a/pubsub/nox.py +++ b/pubsub/nox.py @@ -38,9 +38,10 @@ def unit_tests(session, python_version): session.install('-e', '.') # Run py.test against the unit tests. - session.run('py.test', '--quiet', '--cov-append', '--cov-report=', + session.run( + 'py.test', '--quiet', '--cov-append', '--cov-report=', '--cov=google.cloud.pubsub', '--cov=google.cloud.pubsub_v1', - '--cov-config=.coveragerc', 'tests/unit' + '--cov-config=.coveragerc', 'tests/unit', ) @@ -86,7 +87,8 @@ def lint(session): '--library-filesets', 'google', '--test-filesets', 'tests', # Temporarily allow this to fail. - success_codes=range(0, 100)) + success_codes=range(0, 100), + ) @nox.session From eae7e1434e79cdc5e4c6fb131ab5273460725665 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 9 Aug 2017 10:29:50 -0700 Subject: [PATCH 40/63] Set gRPC limit to 20MB + 1 --- pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py | 6 ++++++ pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py | 9 ++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py index ecb88a50c755..232534739551 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py @@ -19,6 +19,8 @@ import time import uuid +from google import gax + from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.publisher import exceptions from google.cloud.pubsub_v1.publisher.batch import base @@ -175,6 +177,10 @@ def _commit(self): response = self.client.api.publish( self._topic, self.messages, + options=gax.CallOptions(**{ + 'grpc.max_message_length': 20 * (1024 ** 2) + 1, + 'grpc.max_receive_message_length': 20 * (1024 ** 2) + 1, + }), ) end = time.time() logging.getLogger().debug('gRPC Publish took {sec} seconds.'.format( diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index dd416886b5d1..bf52f4cc604f 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -21,6 +21,8 @@ import six +from google import gax + from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.subscriber import consumer from google.cloud.pubsub_v1.subscriber import histogram @@ -174,7 +176,12 @@ def call_rpc(self, request_generator): and blocks if there are no outstanding requests (until such time as there are). """ - return self._client.api.streaming_pull(request_generator) + return self._client.api.streaming_pull( + request_generator, + options=gax.CallOptions(**{ + 'grpc.max_receive_message_length': 20 * (1024 ** 2) + 1, + }), + ) def drop(self, ack_id, byte_size): """Remove the given ack ID from lease management. From 6afcd2a3965541bd98b89d2f0655344113f9964e Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 14 Aug 2017 21:01:18 -0700 Subject: [PATCH 41/63] Suppress not-working grpc options. --- pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py index 232534739551..6d5ff11068a8 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py @@ -177,10 +177,10 @@ def _commit(self): response = self.client.api.publish( self._topic, self.messages, - options=gax.CallOptions(**{ - 'grpc.max_message_length': 20 * (1024 ** 2) + 1, - 'grpc.max_receive_message_length': 20 * (1024 ** 2) + 1, - }), + # options=gax.CallOptions(**{ + # 'grpc.max_message_length': 20 * (1024 ** 2) + 1, + # 'grpc.max_receive_message_length': 20 * (1024 ** 2) + 1, + # }), ) end = time.time() logging.getLogger().debug('gRPC Publish took {sec} seconds.'.format( From f196b5e3815f98e24a962ec3b0bd82a75538a1ef Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 21 Aug 2017 13:41:23 -0700 Subject: [PATCH 42/63] Fix some tests to match new futures. --- .../publisher/batch/test_thread_future.py | 100 +++++++----------- 1 file changed, 37 insertions(+), 63 deletions(-) diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py index ee4014ee3691..870c254c68c8 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py @@ -18,89 +18,51 @@ import pytest -from google.cloud.pubsub_v1 import publisher -from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.publisher import exceptions -from google.cloud.pubsub_v1.publisher.batch.thread import Batch from google.cloud.pubsub_v1.publisher.batch.thread import Future -def create_batch(status=None): - """Create a batch object, which does not commit. - - Args: - status (str): If provided, the batch's internal status will be set - to the provided status. - - Returns: - ~.pubsub_v1.publisher.batch.thread.Batch: The batch object - """ - client = publisher.Client() - batch_settings = types.BatchSettings() - batch = Batch(client, 'topic_name', batch_settings, autocommit=False) - if status: - batch._status = status - return batch - - -def create_future(batch=None): - """Create a Future object to test. - - Args: - ~.pubsub_v1.publisher.batch.thread.Batch: A batch object, such - as one returned from :meth:`create_batch`. If none is provided, - a batch will be automatically created. - - Returns: - ~.pubsub_v1.publisher.batch.thread.Future: The Future object (the - class being tested in this module). - """ - if batch is None: - batch = create_batch() - return Future(batch=batch) - - def test_cancel(): - assert create_future().cancel() is False + assert Future().cancel() is False def test_cancelled(): - assert create_future().cancelled() is False + assert Future().cancelled() is False def test_running(): - assert create_future().running() is True + assert Future().running() is True def test_done(): - batch = create_batch() - future = create_future(batch=batch) + future = Future() assert future.done() is False - batch._status = batch.Status.SUCCESS - assert future._batch.status == 'success' + future.set_result('12345') assert future.done() is True def test_exception_no_error(): - batch = create_batch(status='success') - future = create_future(batch=batch) + future = Future() + future.set_result('12345') assert future.exception() is None def test_exception_with_error(): - batch = create_batch(status='error') - batch.error = RuntimeError('Something really bad happened.') - future = create_future(batch=batch) + future = Future() + error = RuntimeError('Something really bad happened.') + future.set_exception(error) # Make sure that the exception that is returned is the batch's error. # Also check the type to ensure the batch's error did not somehow # change internally. - assert future.exception() is batch.error + assert future.exception() is error assert isinstance(future.exception(), RuntimeError) + with pytest.raises(RuntimeError): + future.result() def test_exception_timeout(): - future = create_future() + future = Future() with mock.patch.object(time, 'sleep') as sleep: with pytest.raises(exceptions.TimeoutError): future.exception(timeout=10) @@ -113,22 +75,20 @@ def test_exception_timeout(): def test_result_no_error(): - batch = create_batch(status='success') - future = create_future(batch=batch) - batch.message_ids[hash(future)] = '42' + future = Future() + future.set_result('42') assert future.result() == '42' def test_result_with_error(): - batch = create_batch(status='error') - batch.error = RuntimeError('Something really bad happened.') - future = create_future(batch=batch) + future = Future() + future.set_exception(RuntimeError('Something really bad happened.')) with pytest.raises(RuntimeError): future.result() def test_add_done_callback_pending_batch(): - future = create_future() + future = Future() callback = mock.Mock() future.add_done_callback(callback) assert len(future._callbacks) == 1 @@ -137,17 +97,31 @@ def test_add_done_callback_pending_batch(): def test_add_done_callback_completed_batch(): - batch = create_batch(status='success') - future = create_future(batch=batch) + future = Future() + future.set_result('12345') callback = mock.Mock(spec=()) future.add_done_callback(callback) callback.assert_called_once_with(future) def test_trigger(): - future = create_future() + future = Future() callback = mock.Mock(spec=()) future.add_done_callback(callback) assert callback.call_count == 0 - future._trigger() + future.set_result('12345') callback.assert_called_once_with(future) + + +def test_set_result_once_only(): + future = Future() + future.set_result('12345') + with pytest.raises(RuntimeError): + future.set_result('67890') + + +def test_set_exception_once_only(): + future = Future() + future.set_exception(ValueError('wah wah')) + with pytest.raises(RuntimeError): + future.set_exception(TypeError('other wah wah')) From ee144aa796a2cba3bd14be8ba75ed9bd96bf08a3 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 21 Aug 2017 13:51:35 -0700 Subject: [PATCH 43/63] Move the future tests to match the code. --- .../publisher/{batch/test_thread_future.py => test_futures.py} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename pubsub/tests/unit/pubsub_v1/publisher/{batch/test_thread_future.py => test_futures.py} (98%) diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py b/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py similarity index 98% rename from pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py rename to pubsub/tests/unit/pubsub_v1/publisher/test_futures.py index 870c254c68c8..bbb2dcbd5cfa 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread_future.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py @@ -19,7 +19,7 @@ import pytest from google.cloud.pubsub_v1.publisher import exceptions -from google.cloud.pubsub_v1.publisher.batch.thread import Future +from google.cloud.pubsub_v1.publisher.futures import Future def test_cancel(): From 8cb8f98d10c309634b042354e412bd88e51444de Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 21 Aug 2017 13:56:10 -0700 Subject: [PATCH 44/63] Fix a publish failure test. --- .../unit/pubsub_v1/publisher/batch/test_thread.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py index bbbc9890a8b1..3daad6eec96b 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py @@ -121,14 +121,18 @@ def test_blocking_commit_no_messages(): def test_blocking_commit_wrong_messageid_length(): batch = create_batch() - batch.publish({'data': b'blah blah blah'}) - batch.publish({'data': b'blah blah blah blah'}) + futures = ( + batch.publish({'data': b'blah blah blah'}), + batch.publish({'data': b'blah blah blah blah'}), + ) # Set up a PublishResponse that only returns one message ID. with mock.patch.object(type(batch.client.api), 'publish') as publish: publish.return_value = types.PublishResponse(message_ids=['a']) - with pytest.raises(exceptions.PublishError): - batch._commit() + batch._commit() + for future in futures: + assert future.done() + assert isinstance(future.exception(), exceptions.PublishError) def test_monitor(): From 47678c3bc0aac674ddbd2d64d09bb12ae3429120 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 21 Aug 2017 13:58:01 -0700 Subject: [PATCH 45/63] Fix final test. --- pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py | 7 +------ pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py | 3 ++- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index bf52f4cc604f..0a6716af07ae 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -176,12 +176,7 @@ def call_rpc(self, request_generator): and blocks if there are no outstanding requests (until such time as there are). """ - return self._client.api.streaming_pull( - request_generator, - options=gax.CallOptions(**{ - 'grpc.max_receive_message_length': 20 * (1024 ** 2) + 1, - }), - ) + return self._client.api.streaming_pull(request_generator) def drop(self, ack_id, byte_size): """Remove the given ack ID from lease management. diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py index c03da7f81ddc..a4819b900639 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -88,7 +88,8 @@ def open(self, callback): # Start the thread to pass the requests. logger.debug('Starting callback requests worker.') self._callback = callback - self._consumer.helper_threads.start('callback requests worker', + self._consumer.helper_threads.start( + 'callback requests worker', self._request_queue, self._callback_requests, ) From 90ef40f3878d2c1bcdc62f10ea9d04ae2de60773 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 21 Aug 2017 14:43:02 -0700 Subject: [PATCH 46/63] Sane max_workers default for 2.7 and 3.4 --- pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py index a4819b900639..403b4938fc99 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -63,7 +63,7 @@ def __init__(self, client, subscription, flow_control=types.FlowControl()): # Also maintain a request queue and an executor. logger.debug('Creating callback requests thread (not starting).') - self._executor = futures.ThreadPoolExecutor() + self._executor = futures.ThreadPoolExecutor(max_workers=10) self._callback_requests = helper_threads.QueueCallbackThread( self._request_queue, self.on_callback_request, From 34c8273ae2637f7108bf8f2ff5612bcde0130cc0 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 21 Aug 2017 14:53:51 -0700 Subject: [PATCH 47/63] Mock credentials appropriately. --- .../pubsub_v1/publisher/batch/test_base.py | 6 ++++- .../pubsub_v1/publisher/batch/test_thread.py | 12 ++++++--- .../publisher/test_publisher_client.py | 25 +++++++++++-------- .../pubsub_v1/subscriber/test_consumer.py | 4 ++- .../pubsub_v1/subscriber/test_policy_base.py | 4 ++- .../subscriber/test_policy_thread.py | 4 ++- .../subscriber/test_subscriber_client.py | 12 ++++++--- 7 files changed, 47 insertions(+), 20 deletions(-) diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py index 5210d2e62b58..289238b694b5 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py @@ -14,6 +14,9 @@ from __future__ import absolute_import +import mock + +from google.auth import credentials from google.cloud.pubsub_v1 import publisher from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.publisher.batch.thread import Batch @@ -29,7 +32,8 @@ def create_batch(status=None, settings=types.BatchSettings()): Returns: ~.pubsub_v1.publisher.batch.thread.Batch: The batch object """ - client = publisher.Client() + creds = mock.Mock(spec=credentials.Credentials) + client = publisher.Client(credentials=creds) batch = Batch(client, 'topic_name', settings, autocommit=False) if status: batch._status = status diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py index 3daad6eec96b..1e3131964451 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py @@ -19,12 +19,18 @@ import pytest +from google.auth import credentials from google.cloud.pubsub_v1 import publisher from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.publisher import exceptions from google.cloud.pubsub_v1.publisher.batch.thread import Batch +def create_client(): + creds = mock.Mock(spec=credentials.Credentials) + return publisher.Client(credentials=creds) + + def create_batch(autocommit=False, **batch_settings): """Return a batch object suitable for testing. @@ -38,14 +44,14 @@ def create_batch(autocommit=False, **batch_settings): Returns: ~.pubsub_v1.publisher.batch.thread.Batch: A batch object. """ - client = publisher.Client() + client = create_client() settings = types.BatchSettings(**batch_settings) return Batch(client, 'topic_name', settings, autocommit=autocommit) def test_init(): """Establish that a monitor thread is usually created on init.""" - client = publisher.Client() + client = create_client() # Do not actually create a thread, but do verify that one was created; # it should be running the batch's "monitor" method (which commits the @@ -64,7 +70,7 @@ def test_init_infinite_latency(): def test_client(): - client = publisher.Client() + client = create_client() settings = types.BatchSettings() batch = Batch(client, 'topic_name', settings, autocommit=False) assert batch.client is client diff --git a/pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py b/pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py index f10863b92d47..0054b25262b5 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py @@ -16,14 +16,19 @@ import pytest +from google.auth import credentials from google.cloud.gapic.pubsub.v1 import publisher_client - from google.cloud.pubsub_v1 import publisher from google.cloud.pubsub_v1 import types +def create_client(): + creds = mock.Mock(spec=credentials.Credentials) + return publisher.Client(credentials=creds) + + def test_init(): - client = publisher.Client() + client = create_client() # A plain client should have an `api` (the underlying GAPIC) and a # batch settings object, which should have the defaults. @@ -35,7 +40,7 @@ def test_init(): def test_batch_accepting(): """Establish that an existing batch is returned if it accepts messages.""" - client = publisher.Client() + client = create_client() message = types.PubsubMessage(data=b'foo') # At first, there are no batches, so this should return a new batch @@ -52,7 +57,7 @@ def test_batch_accepting(): def test_batch_without_autocreate(): - client = publisher.Client() + client = create_client() message = types.PubsubMessage(data=b'foo') # If `create=False` is sent, then when the batch is not found, None @@ -64,7 +69,7 @@ def test_batch_without_autocreate(): def test_publish(): - client = publisher.Client() + client = create_client() # Use a mock in lieu of the actual batch class; set the mock up to claim # indiscriminately that it accepts all messages. @@ -92,7 +97,7 @@ def test_publish(): def test_publish_data_not_bytestring_error(): - client = publisher.Client() + client = create_client() with pytest.raises(TypeError): client.publish('topic_name', u'This is a text string.') with pytest.raises(TypeError): @@ -100,7 +105,7 @@ def test_publish_data_not_bytestring_error(): def test_publish_attrs_bytestring(): - client = publisher.Client() + client = create_client() # Use a mock in lieu of the actual batch class; set the mock up to claim # indiscriminately that it accepts all messages. @@ -118,13 +123,13 @@ def test_publish_attrs_bytestring(): def test_publish_attrs_type_error(): - client = publisher.Client() + client = create_client() with pytest.raises(TypeError): client.publish('topic_name', b'foo', answer=42) def test_gapic_instance_method(): - client = publisher.Client() + client = create_client() with mock.patch.object(client.api, '_create_topic', autospec=True) as ct: client.create_topic('projects/foo/topics/bar') assert ct.call_count == 1 @@ -133,6 +138,6 @@ def test_gapic_instance_method(): def test_gapic_class_method(): - client = publisher.Client() + client = create_client() answer = client.topic_path('foo', 'bar') assert answer == 'projects/foo/topics/bar' diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py index 05cb646a0f02..a6d76588bc22 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py @@ -18,6 +18,7 @@ import pytest +from google.auth import credentials from google.cloud.pubsub_v1 import subscriber from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.subscriber import consumer @@ -26,7 +27,8 @@ def create_consumer(): - client = subscriber.Client() + creds = mock.Mock(spec=credentials.Credentials) + client = subscriber.Client(credentials=creds) subscription = client.subscribe('sub_name_e') return consumer.Consumer(policy=subscription) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py index ca124bdcea31..df963424ccb9 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py @@ -16,13 +16,15 @@ import mock +from google.auth import credentials from google.cloud.pubsub_v1 import subscriber from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.subscriber.policy import thread def create_policy(flow_control=types.FlowControl()): - client = subscriber.Client() + creds = mock.Mock(spec=credentials.Credentials) + client = subscriber.Client(credentials=creds) return thread.Policy(client, 'sub_name_d', flow_control=flow_control) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py index e715ddc79c42..232c8ee01e8b 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py @@ -20,6 +20,7 @@ import pytest +from google.auth import credentials from google.cloud.pubsub_v1 import subscriber from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.subscriber import helper_threads @@ -28,7 +29,8 @@ def create_policy(): - client = subscriber.Client() + creds = mock.Mock(spec=credentials.Credentials) + client = subscriber.Client(credentials=creds) return thread.Policy(client, 'sub_name_c') diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py index 6489de321f11..50e90fead181 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py @@ -14,23 +14,29 @@ import mock +from google.auth import credentials from google.cloud.pubsub_v1 import subscriber from google.cloud.pubsub_v1.subscriber.policy import thread +def create_client(): + creds = mock.Mock(spec=credentials.Credentials) + return subscriber.Client(credentials=creds) + + def test_init(): - client = subscriber.Client() + client = create_client() assert client._policy_class is thread.Policy def test_subscribe(): - client = subscriber.Client() + client = create_client() subscription = client.subscribe('sub_name_a') assert isinstance(subscription, thread.Policy) def test_subscribe_with_callback(): - client = subscriber.Client() + client = create_client() callback = mock.Mock() with mock.patch.object(thread.Policy, 'open') as open_: subscription = client.subscribe('sub_name_b', callback) From 831fe75f6551907c9518980f9d505b0bc5f3a4da Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 21 Aug 2017 20:06:09 -0700 Subject: [PATCH 48/63] Remove fail_under from .coveragerc. It is still in nox.py in the cover session. --- pubsub/.coveragerc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pubsub/.coveragerc b/pubsub/.coveragerc index 588fc38a3c2d..41ca7428e2ee 100644 --- a/pubsub/.coveragerc +++ b/pubsub/.coveragerc @@ -6,8 +6,8 @@ source = tests.unit [report] -fail_under = 100 show_missing = True + exclude_lines = # Re-enable the standard pragma pragma: NO COVER From 02fa81f463d3e97d280b04e931d9322073326bf6 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 21 Aug 2017 20:47:58 -0700 Subject: [PATCH 49/63] Make histogram and helper_threads private. --- .../{helper_threads.py => _helper_threads.py} | 0 .../{histogram.py => _histogram.py} | 0 .../cloud/pubsub_v1/subscriber/consumer.py | 6 ++--- .../cloud/pubsub_v1/subscriber/policy/base.py | 6 ++--- .../pubsub_v1/subscriber/policy/thread.py | 4 +-- .../pubsub_v1/subscriber/test_consumer.py | 4 +-- .../subscriber/test_helper_threads.py | 26 +++++++++---------- .../pubsub_v1/subscriber/test_histogram.py | 18 ++++++------- .../subscriber/test_policy_thread.py | 4 +-- 9 files changed, 33 insertions(+), 35 deletions(-) rename pubsub/google/cloud/pubsub_v1/subscriber/{helper_threads.py => _helper_threads.py} (100%) rename pubsub/google/cloud/pubsub_v1/subscriber/{histogram.py => _histogram.py} (100%) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/helper_threads.py b/pubsub/google/cloud/pubsub_v1/subscriber/_helper_threads.py similarity index 100% rename from pubsub/google/cloud/pubsub_v1/subscriber/helper_threads.py rename to pubsub/google/cloud/pubsub_v1/subscriber/_helper_threads.py diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/histogram.py b/pubsub/google/cloud/pubsub_v1/subscriber/_histogram.py similarity index 100% rename from pubsub/google/cloud/pubsub_v1/subscriber/histogram.py rename to pubsub/google/cloud/pubsub_v1/subscriber/_histogram.py diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py index 68a9bd386201..aff41a955db9 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py @@ -120,7 +120,7 @@ import queue import threading -from google.cloud.pubsub_v1.subscriber import helper_threads +from google.cloud.pubsub_v1.subscriber import _helper_threads _LOGGER = logging.getLogger(__name__) @@ -179,7 +179,7 @@ def __init__(self, policy): self._exiting = threading.Event() self.active = False - self.helper_threads = helper_threads.HelperThreadRegistry() + self.helper_threads = _helper_threads.HelperThreadRegistry() """:cls:`_helper_threads.HelperThreads`: manages the helper threads. The policy may use this to schedule its own helper threads. """ @@ -210,7 +210,7 @@ def _request_generator_thread(self): # are none. This can and must block to keep the stream open. while True: request = self._request_queue.get() - if request == helper_threads.STOP: + if request == _helper_threads.STOP: _LOGGER.debug('Request generator signaled to stop.') break diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index 0a6716af07ae..ecfaab33d3f4 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -21,11 +21,9 @@ import six -from google import gax - from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.subscriber import consumer -from google.cloud.pubsub_v1.subscriber import histogram +from google.cloud.pubsub_v1.subscriber import _histogram logger = logging.getLogger(__name__) @@ -72,7 +70,7 @@ def __init__(self, client, subscription, self._ack_deadline = 10 self._last_histogram_size = 0 self.flow_control = flow_control - self.histogram = histogram.Histogram(data=histogram_data) + self.histogram = _histogram.Histogram(data=histogram_data) # These are for internal flow control tracking. # They should not need to be used by subclasses. diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py index 403b4938fc99..359d3880f67e 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -22,7 +22,7 @@ import grpc from google.cloud.pubsub_v1 import types -from google.cloud.pubsub_v1.subscriber import helper_threads +from google.cloud.pubsub_v1.subscriber import _helper_threads from google.cloud.pubsub_v1.subscriber.policy import base from google.cloud.pubsub_v1.subscriber.message import Message @@ -64,7 +64,7 @@ def __init__(self, client, subscription, flow_control=types.FlowControl()): # Also maintain a request queue and an executor. logger.debug('Creating callback requests thread (not starting).') self._executor = futures.ThreadPoolExecutor(max_workers=10) - self._callback_requests = helper_threads.QueueCallbackThread( + self._callback_requests = _helper_threads.QueueCallbackThread( self._request_queue, self.on_callback_request, ) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py index a6d76588bc22..a4765875ef9f 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py @@ -22,7 +22,7 @@ from google.cloud.pubsub_v1 import subscriber from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.subscriber import consumer -from google.cloud.pubsub_v1.subscriber import helper_threads +from google.cloud.pubsub_v1.subscriber import _helper_threads from google.cloud.pubsub_v1.subscriber.policy import thread @@ -57,7 +57,7 @@ def test_request_generator_thread(): assert request.ack_ids == ['i'] # The poison pill should stop the loop. - consumer.send_request(helper_threads.STOP) + consumer.send_request(_helper_threads.STOP) with pytest.raises(StopIteration): next(generator) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py index f1234eefc4b2..0ac36bcdf583 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py @@ -17,11 +17,11 @@ import mock -from google.cloud.pubsub_v1.subscriber import helper_threads +from google.cloud.pubsub_v1.subscriber import _helper_threads def test_start(): - registry = helper_threads.HelperThreadRegistry() + registry = _helper_threads.HelperThreadRegistry() queue_ = queue.Queue() target = mock.Mock(spec=()) with mock.patch.object(threading.Thread, 'start', autospec=True) as start: @@ -30,15 +30,15 @@ def test_start(): def test_stop_noop(): - registry = helper_threads.HelperThreadRegistry() + registry = _helper_threads.HelperThreadRegistry() assert len(registry._helper_threads) == 0 registry.stop('foo') assert len(registry._helper_threads) == 0 def test_stop_dead_thread(): - registry = helper_threads.HelperThreadRegistry() - registry._helper_threads['foo'] = helper_threads._HelperThread( + registry = _helper_threads.HelperThreadRegistry() + registry._helper_threads['foo'] = _helper_threads._HelperThread( name='foo', queue=None, thread=threading.Thread(target=lambda: None), @@ -55,8 +55,8 @@ def test_stop_alive_thread(join, is_alive, put): is_alive.return_value = True # Set up a registry with a helper thread in it. - registry = helper_threads.HelperThreadRegistry() - registry._helper_threads['foo'] = helper_threads._HelperThread( + registry = _helper_threads.HelperThreadRegistry() + registry._helper_threads['foo'] = _helper_threads._HelperThread( name='foo', queue=queue.Queue(), thread=threading.Thread(target=lambda: None), @@ -71,12 +71,12 @@ def test_stop_alive_thread(join, is_alive, put): # Assert that all of our mocks were called in the expected manner. is_alive.assert_called_once_with() join.assert_called_once_with() - put.assert_called_once_with(helper_threads.STOP) + put.assert_called_once_with(_helper_threads.STOP) def test_stop_all(): - registry = helper_threads.HelperThreadRegistry() - registry._helper_threads['foo'] = helper_threads._HelperThread( + registry = _helper_threads.HelperThreadRegistry() + registry._helper_threads['foo'] = _helper_threads._HelperThread( name='foo', queue=None, thread=threading.Thread(target=lambda: None), @@ -87,7 +87,7 @@ def test_stop_all(): def test_stop_all_noop(): - registry = helper_threads.HelperThreadRegistry() + registry = _helper_threads.HelperThreadRegistry() assert len(registry._helper_threads) == 0 registry.stop_all() assert len(registry._helper_threads) == 0 @@ -96,12 +96,12 @@ def test_stop_all_noop(): def test_queue_callback_thread(): queue_ = queue.Queue() callback = mock.Mock(spec=()) - qct = helper_threads.QueueCallbackThread(queue_, callback) + qct = _helper_threads.QueueCallbackThread(queue_, callback) # Set up an appropriate mock for the queue, and call the queue callback # thread. with mock.patch.object(queue.Queue, 'get') as get: - get.side_effect = (mock.sentinel.A, helper_threads.STOP) + get.side_effect = (mock.sentinel.A, _helper_threads.STOP) qct() # Assert that we got the expected calls. diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_histogram.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_histogram.py index d3e5e02a92c0..23474a19d116 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_histogram.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_histogram.py @@ -12,18 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from google.cloud.pubsub_v1.subscriber import histogram +from google.cloud.pubsub_v1.subscriber import _histogram def test_init(): data = {} - histo = histogram.Histogram(data=data) + histo = _histogram.Histogram(data=data) assert histo._data is data assert len(histo) == 0 def test_contains(): - histo = histogram.Histogram() + histo = _histogram.Histogram() histo.add(10) histo.add(20) assert 10 in histo @@ -32,7 +32,7 @@ def test_contains(): def test_max(): - histo = histogram.Histogram() + histo = _histogram.Histogram() assert histo.max == 600 histo.add(120) assert histo.max == 120 @@ -43,7 +43,7 @@ def test_max(): def test_min(): - histo = histogram.Histogram() + histo = _histogram.Histogram() assert histo.min == 10 histo.add(60) assert histo.min == 60 @@ -54,7 +54,7 @@ def test_min(): def test_add(): - histo = histogram.Histogram() + histo = _histogram.Histogram() histo.add(60) assert histo._data[60] == 1 histo.add(60) @@ -62,21 +62,21 @@ def test_add(): def test_add_lower_limit(): - histo = histogram.Histogram() + histo = _histogram.Histogram() histo.add(5) assert 5 not in histo assert 10 in histo def test_add_upper_limit(): - histo = histogram.Histogram() + histo = _histogram.Histogram() histo.add(12000) assert 12000 not in histo assert 600 in histo def test_percentile(): - histo = histogram.Histogram() + histo = _histogram.Histogram() [histo.add(i) for i in range(101, 201)] assert histo.percentile(100) == 200 assert histo.percentile(101) == 200 diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py index 232c8ee01e8b..397f47264788 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py @@ -23,7 +23,7 @@ from google.auth import credentials from google.cloud.pubsub_v1 import subscriber from google.cloud.pubsub_v1 import types -from google.cloud.pubsub_v1.subscriber import helper_threads +from google.cloud.pubsub_v1.subscriber import _helper_threads from google.cloud.pubsub_v1.subscriber import message from google.cloud.pubsub_v1.subscriber.policy import thread @@ -48,7 +48,7 @@ def test_close(): assert 'callback request worker' not in policy._consumer.helper_threads -@mock.patch.object(helper_threads.HelperThreadRegistry, 'start') +@mock.patch.object(_helper_threads.HelperThreadRegistry, 'start') @mock.patch.object(threading.Thread, 'start') def test_open(thread_start, htr_start): policy = create_policy() From 2458b55d68ff1a0c78e61d28124ef1d99c931149 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 22 Aug 2017 08:08:14 -0700 Subject: [PATCH 50/63] Add a publishing system test. --- pubsub/tests/system.py | 58 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 pubsub/tests/system.py diff --git a/pubsub/tests/system.py b/pubsub/tests/system.py new file mode 100644 index 000000000000..b879a382accc --- /dev/null +++ b/pubsub/tests/system.py @@ -0,0 +1,58 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import uuid + +import six + +from google import auth +from google.cloud import pubsub_v1 + + +def _resource_name(resource_type): + """Return a randomly selected name for a resource. + + Args: + resource_type (str): The resource for which a name is being + generated. Should be singular (e.g. "topic", "subscription") + """ + return 'projects/{project}/{resource_type}s/st-{random}'.format( + project=auth.default()[1], + random=str(uuid.uuid4())[0:8], + resource_type=resource_type, + ) + + +def test_publish_messages(): + publisher = pubsub_v1.PublisherClient() + topic_name = _resource_name('topic') + futures = [] + + try: + publisher.create_topic(topic_name) + for i in range(0, 500): + futures.append( + publisher.publish( + topic_name, + b'The hail in Wales falls mainly on the snails.', + num=str(i), + ), + ) + for future in futures: + result = future.result() + assert isinstance(result, (six.text_type, six.binary_type)) + finally: + publisher.delete_topic(topic_name) From 17b6544fd229aa81db9ed8508225775efe21ff5f Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 22 Aug 2017 10:17:08 -0700 Subject: [PATCH 51/63] Subscription system test. --- pubsub/tests/system.py | 50 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/pubsub/tests/system.py b/pubsub/tests/system.py index b879a382accc..046b288bd9f3 100644 --- a/pubsub/tests/system.py +++ b/pubsub/tests/system.py @@ -14,8 +14,10 @@ from __future__ import absolute_import +import time import uuid +import mock import six from google import auth @@ -29,7 +31,7 @@ def _resource_name(resource_type): resource_type (str): The resource for which a name is being generated. Should be singular (e.g. "topic", "subscription") """ - return 'projects/{project}/{resource_type}s/st-{random}'.format( + return 'projects/{project}/{resource_type}s/st-n{random}'.format( project=auth.default()[1], random=str(uuid.uuid4())[0:8], resource_type=resource_type, @@ -56,3 +58,49 @@ def test_publish_messages(): assert isinstance(result, (six.text_type, six.binary_type)) finally: publisher.delete_topic(topic_name) + + +def test_subscribe_to_messages(): + publisher = pubsub_v1.PublisherClient() + subscriber = pubsub_v1.SubscriberClient() + topic_name = _resource_name('topic') + sub_name = _resource_name('subscription') + + try: + # Create a topic. + publisher.create_topic(topic_name) + + # Subscribe to the topic. This must happen before the messages + # are published. + subscriber.create_subscription(sub_name, topic_name) + subscription = subscriber.subscribe(sub_name) + + # Publish some messages. + futures = [publisher.publish( + topic_name, + b'Wooooo! The claaaaaw!', + num=str(i), + ) for i in range(0, 50)] + + # Make sure the publish completes. + [f.result() for f in futures] + + # The callback should process the message numbers to prove + # that we got everything at least once. + callback = mock.Mock(wraps=lambda message: message.ack()) + + # Actually open the subscription and hold it open for a few seconds. + subscription.open(callback) + for second in range(0,10): + time.sleep(1) + + # The callback should have fired at least fifty times, but it + # may take some time. + if callback.call_count >= 50: + return + + # Okay, we took too long; fail out. + assert callback.call_count >= 50 + finally: + publisher.delete_topic(topic_name) + subscriber.delete_subscription(sub_name) From a24c0a76adb072470584f2bbbbac2169423fd754 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 22 Aug 2017 13:27:57 -0700 Subject: [PATCH 52/63] Update tests. --- .../unit/pubsub_v1/publisher/batch/test_thread.py | 12 +++++------- .../tests/unit/pubsub_v1/publisher/test_futures.py | 10 +++------- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py index 1e3131964451..1e11f86b6c0b 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py @@ -159,15 +159,13 @@ def test_monitor_already_committed(): batch = create_batch(max_latency=5.0) batch._status = 'something else' with mock.patch.object(time, 'sleep') as sleep: - with mock.patch.object(type(batch), '_commit') as _commit: - batch.monitor() + batch.monitor() - # The monitor should have waited the given latency. - sleep.assert_called_once_with(5.0) + # The monitor should have waited the given latency. + sleep.assert_called_once_with(5.0) - # Since the batch was no longer accepting messages, the - # commit function should *not* have been called. - assert _commit.call_count == 0 + # The status should not have changed. + assert batch._status == 'something else' def test_publish(): diff --git a/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py b/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py index bbb2dcbd5cfa..e018481b28e1 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import threading import time import mock @@ -63,16 +64,11 @@ def test_exception_with_error(): def test_exception_timeout(): future = Future() - with mock.patch.object(time, 'sleep') as sleep: + with mock.patch.object(threading.Event, 'wait') as wait: + wait.return_value = False with pytest.raises(exceptions.TimeoutError): future.exception(timeout=10) - # The sleep should have been called with 1, 2, 4, then 3 seconds - # (the first three due to linear backoff, then the last one because - # only three seconds were left before the timeout was to be hit). - assert sleep.call_count == 4 - assert sleep.mock_calls[0] - def test_result_no_error(): future = Future() From a054324ab669e029e4e363ed0e4da39cf36c78a2 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 22 Aug 2017 13:34:12 -0700 Subject: [PATCH 53/63] Make the wait test work on 2.7 --- pubsub/tests/unit/pubsub_v1/publisher/test_futures.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py b/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py index e018481b28e1..ad91f2a3f0ef 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py @@ -64,10 +64,8 @@ def test_exception_with_error(): def test_exception_timeout(): future = Future() - with mock.patch.object(threading.Event, 'wait') as wait: - wait.return_value = False - with pytest.raises(exceptions.TimeoutError): - future.exception(timeout=10) + with pytest.raises(exceptions.TimeoutError): + future.exception(timeout=0.01) def test_result_no_error(): From 3735da5e11ad108497ffd9161e878c72f065d8ae Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 22 Aug 2017 13:36:48 -0700 Subject: [PATCH 54/63] Discarding unused mocks. --- pubsub/tests/unit/pubsub_v1/publisher/test_futures.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py b/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py index ad91f2a3f0ef..e9b64a202e94 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import threading -import time - import mock import pytest From ac9f182bdc24b6f92836dafd76c051cd533802c5 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 22 Aug 2017 13:59:47 -0700 Subject: [PATCH 55/63] Make _consumer a private module. --- .../cloud/pubsub_v1/subscriber/{consumer.py => _consumer.py} | 3 ++- pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py | 4 ++-- pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) rename pubsub/google/cloud/pubsub_v1/subscriber/{consumer.py => _consumer.py} (99%) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py b/pubsub/google/cloud/pubsub_v1/subscriber/_consumer.py similarity index 99% rename from pubsub/google/cloud/pubsub_v1/subscriber/consumer.py rename to pubsub/google/cloud/pubsub_v1/subscriber/_consumer.py index aff41a955db9..9fb2567176bc 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/consumer.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/_consumer.py @@ -254,7 +254,8 @@ def start_consuming(self): """Start consuming the stream.""" self.active = True self._exiting.clear() - self.helper_threads.start('consume bidirectional stream', + self.helper_threads.start( + 'consume bidirectional stream', self._request_queue, self._blocking_consume, ) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index ecfaab33d3f4..32035124145e 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -22,7 +22,7 @@ import six from google.cloud.pubsub_v1 import types -from google.cloud.pubsub_v1.subscriber import consumer +from google.cloud.pubsub_v1.subscriber import _consumer from google.cloud.pubsub_v1.subscriber import _histogram logger = logging.getLogger(__name__) @@ -66,7 +66,7 @@ def __init__(self, client, subscription, """ self._client = client self._subscription = subscription - self._consumer = consumer.Consumer(self) + self._consumer = _consumer.Consumer(self) self._ack_deadline = 10 self._last_histogram_size = 0 self.flow_control = flow_control diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py index a4765875ef9f..2a3429fbc5b3 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py @@ -21,7 +21,7 @@ from google.auth import credentials from google.cloud.pubsub_v1 import subscriber from google.cloud.pubsub_v1 import types -from google.cloud.pubsub_v1.subscriber import consumer +from google.cloud.pubsub_v1.subscriber import _consumer from google.cloud.pubsub_v1.subscriber import _helper_threads from google.cloud.pubsub_v1.subscriber.policy import thread @@ -30,7 +30,7 @@ def create_consumer(): creds = mock.Mock(spec=credentials.Credentials) client = subscriber.Client(credentials=creds) subscription = client.subscribe('sub_name_e') - return consumer.Consumer(policy=subscription) + return _consumer.Consumer(policy=subscription) def test_send_request(): From f272eca2b93dfb5a8970296a1452f3742238850d Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 22 Aug 2017 14:06:58 -0700 Subject: [PATCH 56/63] Switch from recursion to while for maintain_leases. --- .../cloud/pubsub_v1/subscriber/policy/base.py | 64 +++++++++---------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py index 32035124145e..85d047eb9439 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -282,38 +282,38 @@ def maintain_leases(self): implementing your own policy, you _should_ call this method in an appropriate form of subprocess. """ - # Sanity check: Should this infinitely-recursive loop quit? - if not self._consumer.active: - return - - # Determine the appropriate duration for the lease. - # This is based off of how long previous messages have taken to ack, - # with a sensible default and within the ranges allowed by Pub/Sub. - p99 = self.histogram.percentile(99) - logger.debug('The current p99 value is %d seconds.' % p99) - - # Create a streaming pull request. - # We do not actually call `modify_ack_deadline` over and over because - # it is more efficient to make a single request. - ack_ids = list(self.managed_ack_ids) - logger.debug('Renewing lease for %d ack IDs.' % len(ack_ids)) - if len(ack_ids) > 0 and self._consumer.active: - request = types.StreamingPullRequest( - modify_deadline_ack_ids=ack_ids, - modify_deadline_seconds=[p99] * len(ack_ids), - ) - self._consumer.send_request(request) - - # Now wait an appropriate period of time and do this again. - # - # We determine the appropriate period of time based on a random - # period between 0 seconds and 90% of the lease. This use of - # jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases - # where there are many clients. - snooze = random.uniform(0.0, p99 * 0.9) - logger.debug('Snoozing lease management for %f seconds.' % snooze) - time.sleep(snooze) - self.maintain_leases() + while True: + # Sanity check: Should this infinitely loop quit? + if not self._consumer.active: + return + + # Determine the appropriate duration for the lease. This is + # based off of how long previous messages have taken to ack, with + # a sensible default and within the ranges allowed by Pub/Sub. + p99 = self.histogram.percentile(99) + logger.debug('The current p99 value is %d seconds.' % p99) + + # Create a streaming pull request. + # We do not actually call `modify_ack_deadline` over and over + # because it is more efficient to make a single request. + ack_ids = list(self.managed_ack_ids) + logger.debug('Renewing lease for %d ack IDs.' % len(ack_ids)) + if len(ack_ids) > 0 and self._consumer.active: + request = types.StreamingPullRequest( + modify_deadline_ack_ids=ack_ids, + modify_deadline_seconds=[p99] * len(ack_ids), + ) + self._consumer.send_request(request) + + # Now wait an appropriate period of time and do this again. + # + # We determine the appropriate period of time based on a random + # period between 0 seconds and 90% of the lease. This use of + # jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases + # where there are many clients. + snooze = random.uniform(0.0, p99 * 0.9) + logger.debug('Snoozing lease management for %f seconds.' % snooze) + time.sleep(snooze) def modify_ack_deadline(self, ack_id, seconds): """Modify the ack deadline for the given ack_id. From e6bcbe77e305bccc0ec83febfc115ab408924d9c Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 22 Aug 2017 14:30:33 -0700 Subject: [PATCH 57/63] Add exception logging in the callback. --- .../pubsub_v1/subscriber/_helper_threads.py | 17 +++++++++++------ .../pubsub_v1/subscriber/test_helper_threads.py | 16 ++++++++++++++++ 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/_helper_threads.py b/pubsub/google/cloud/pubsub_v1/subscriber/_helper_threads.py index 42bfab4b4a51..21e812a0d2ad 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/_helper_threads.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/_helper_threads.py @@ -27,7 +27,8 @@ _LOGGER = logging.getLogger(__name__) -_HelperThread = collections.namedtuple('HelperThreads', +_HelperThread = collections.namedtuple( + 'HelperThreads', ['name', 'thread', 'queue'], ) @@ -117,8 +118,12 @@ def __call__(self): if item == STOP: break - # This doesn't presently deal with exceptions that bubble up - # through the callback. If there is an error here, the thread will - # exit and no further queue items will be processed. We could - # potentially capture errors, log them, and then continue on. - self._callback(item) + # Run the callback. If any exceptions occur, log them and + # continue. + try: + self._callback(item) + except Exception as exc: + _LOGGER.error('{class_}: {message}'.format( + class_=exc.__class__.__name__, + message=str(exc), + )) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py index 0ac36bcdf583..84775f0be2c1 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py @@ -107,3 +107,19 @@ def test_queue_callback_thread(): # Assert that we got the expected calls. assert get.call_count == 2 callback.assert_called_once_with(mock.sentinel.A) + + +def test_queue_callback_thread_exception(): + queue_ = queue.Queue() + callback = mock.Mock(spec=(), side_effect=(Exception,)) + qct = _helper_threads.QueueCallbackThread(queue_, callback) + + # Set up an appropriate mock for the queue, and call the queue callback + # thread. + with mock.patch.object(queue.Queue, 'get') as get: + get.side_effect = (mock.sentinel.A, _helper_threads.STOP) + qct() + + # Assert that we got the expected calls. + assert get.call_count == 2 + callback.assert_called_once_with(mock.sentinel.A) From b9115ca359903b7578aa75c1aa5011681cc44f4f Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 23 Aug 2017 07:30:19 -0700 Subject: [PATCH 58/63] Fix a long line. Whups. --- pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py index 146a8c3be899..15fab1c5bb91 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py @@ -173,8 +173,8 @@ def _commit(self): self.messages, ) end = time.time() - logging.getLogger().debug('gRPC Publish took {sec} seconds.'.format( - sec=end - start, + logging.getLogger().debug('gRPC Publish took {s} seconds.'.format( + s=end - start, )) # We got a response from Pub/Sub; denote that we are processing. From 6ae46cb668cd1c0e8df309360fe18108c6dedbee Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 23 Aug 2017 10:08:45 -0700 Subject: [PATCH 59/63] Accept an executor. --- .../cloud/pubsub_v1/subscriber/policy/thread.py | 10 ++++++++-- .../unit/pubsub_v1/subscriber/test_policy_thread.py | 13 +++++++++++-- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py index 359d3880f67e..2147ae499fa6 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -36,7 +36,8 @@ class Policy(base.BasePolicy): This consumer handles the connection to the Pub/Sub service and all of the concurrency needs. """ - def __init__(self, client, subscription, flow_control=types.FlowControl()): + def __init__(self, client, subscription, flow_control=types.FlowControl(), + executor=None): """Instantiate the policy. Args: @@ -47,6 +48,9 @@ def __init__(self, client, subscription, flow_control=types.FlowControl()): ``projects/{project}/subscriptions/{subscription}``. flow_control (~.pubsub_v1.types.FlowControl): The flow control settings. + executor (~concurrent.futures.ThreadPoolExecutor): A + ThreadPoolExecutor instance, or anything duck-type compatible + with it. """ # Default the callback to a no-op; it is provided by `.open`. self._callback = lambda message: None @@ -63,7 +67,9 @@ def __init__(self, client, subscription, flow_control=types.FlowControl()): # Also maintain a request queue and an executor. logger.debug('Creating callback requests thread (not starting).') - self._executor = futures.ThreadPoolExecutor(max_workers=10) + self._executor = executor + if self._executor is None: + self._executor = futures.ThreadPoolExecutor(max_workers=10) self._callback_requests = _helper_threads.QueueCallbackThread( self._request_queue, self.on_callback_request, diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py index 397f47264788..6210ea392315 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import absolute_import + +from concurrent import futures import threading import grpc @@ -28,10 +31,10 @@ from google.cloud.pubsub_v1.subscriber.policy import thread -def create_policy(): +def create_policy(**kwargs): creds = mock.Mock(spec=credentials.Credentials) client = subscriber.Client(credentials=creds) - return thread.Policy(client, 'sub_name_c') + return thread.Policy(client, 'sub_name_c', **kwargs) def test_init(): @@ -39,6 +42,12 @@ def test_init(): policy._callback(None) +def test_init_with_executor(): + executor = futures.ThreadPoolExecutor(max_workers=25) + policy = create_policy(executor=executor) + assert policy._executor is executor + + def test_close(): policy = create_policy() consumer = policy._consumer From 852438ed125b8bb3d261d47976d30080eec0d8f4 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 23 Aug 2017 10:13:59 -0700 Subject: [PATCH 60/63] Fix a minor flake8 complaint. --- pubsub/tests/system.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pubsub/tests/system.py b/pubsub/tests/system.py index 046b288bd9f3..02666eae676a 100644 --- a/pubsub/tests/system.py +++ b/pubsub/tests/system.py @@ -91,7 +91,7 @@ def test_subscribe_to_messages(): # Actually open the subscription and hold it open for a few seconds. subscription.open(callback) - for second in range(0,10): + for second in range(0, 10): time.sleep(1) # The callback should have fired at least fifty times, but it From 13c22055aac4780d6dcd4f0773d658e7b7e314ce Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 23 Aug 2017 13:10:39 -0700 Subject: [PATCH 61/63] No longer need to use inf for exception timeout. --- pubsub/google/cloud/pubsub_v1/publisher/futures.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/publisher/futures.py b/pubsub/google/cloud/pubsub_v1/publisher/futures.py index dff4aa234947..cbc67d9e55c3 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/futures.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/futures.py @@ -106,10 +106,6 @@ def exception(self, timeout=None, _wait=1): Returns: Exception: The exception raised by the call, if any. """ - # If no timeout was specified, use inf. - if timeout is None: - timeout = float('inf') - # Wait until the future is done. if not self._completed.wait(timeout=timeout): raise exceptions.TimeoutError('Timed out waiting for result.') From 6a03f480d6df57c8cc579913dd49750c3beaa030 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 23 Aug 2017 13:24:41 -0700 Subject: [PATCH 62/63] Fixes discussed in chat with @jonparrott. --- .../cloud/pubsub_v1/publisher/batch/base.py | 22 ++++++++++--------- .../cloud/pubsub_v1/publisher/batch/thread.py | 6 ++--- .../pubsub_v1/subscriber/policy/thread.py | 17 +++++++++----- .../pubsub_v1/publisher/batch/test_base.py | 7 +++--- .../pubsub_v1/publisher/batch/test_thread.py | 7 +++--- .../subscriber/test_policy_thread.py | 3 ++- 6 files changed, 35 insertions(+), 27 deletions(-) diff --git a/pubsub/google/cloud/pubsub_v1/publisher/batch/base.py b/pubsub/google/cloud/pubsub_v1/publisher/batch/base.py index 68dc9c2850ec..61eea2bb9ad5 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/batch/base.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/batch/base.py @@ -15,6 +15,7 @@ from __future__ import absolute_import import abc +import enum import six @@ -104,7 +105,7 @@ def will_accept(self, message): bool: Whether this batch can accept the message. """ # If this batch is not accepting messages generally, return False. - if self.status != self.Status.ACCEPTING_MESSAGES: + if self.status != BatchStatus.ACCEPTING_MESSAGES: return False # If this batch can not hold the message in question, return False. @@ -133,13 +134,14 @@ def publish(self, message): """ raise NotImplementedError - class Status(object): - """An enum class representing valid statuses for a batch. - It is acceptable for a class to use a status that is not on this - class; this represents the list of statuses where the existing - library hooks in functionality. - """ - ACCEPTING_MESSAGES = 'accepting messages' - ERROR = 'error' - SUCCESS = 'success' +class BatchStatus(object): + """An enum-like class representing valid statuses for a batch. + + It is acceptable for a class to use a status that is not on this + class; this represents the list of statuses where the existing + library hooks in functionality. + """ + ACCEPTING_MESSAGES = 'accepting messages' + ERROR = 'error' + SUCCESS = 'success' diff --git a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py index 15fab1c5bb91..f5c08a76f315 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py @@ -67,7 +67,7 @@ def __init__(self, client, topic, settings, autocommit=True): self._messages = [] self._size = 0 self._settings = settings - self._status = self.Status.ACCEPTING_MESSAGES + self._status = base.BatchStatus.ACCEPTING_MESSAGES self._topic = topic # If max latency is specified, start a thread to monitor the batch and @@ -155,7 +155,7 @@ def _commit(self): with self._commit_lock: # If, in the intervening period, the batch started to be committed, # or completed a commit, then no-op at this point. - if self._status != self.Status.ACCEPTING_MESSAGES: + if self._status != base.BatchStatus.ACCEPTING_MESSAGES: return # Update the status. @@ -192,7 +192,7 @@ def _commit(self): # Iterate over the futures on the queue and return the response # IDs. We are trusting that there is a 1:1 mapping, and raise an # exception if not. - self._status = self.Status.SUCCESS + self._status = base.BatchStatus.SUCCESS for message_id, future in zip(response.message_ids, self._futures): future.set_result(message_id) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py index 2147ae499fa6..e83a93e287a9 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -15,7 +15,7 @@ from __future__ import absolute_import from concurrent import futures -import queue +from queue import Queue import logging import threading @@ -37,7 +37,7 @@ class Policy(base.BasePolicy): the concurrency needs. """ def __init__(self, client, subscription, flow_control=types.FlowControl(), - executor=None): + executor=None, queue=None): """Instantiate the policy. Args: @@ -48,15 +48,20 @@ def __init__(self, client, subscription, flow_control=types.FlowControl(), ``projects/{project}/subscriptions/{subscription}``. flow_control (~.pubsub_v1.types.FlowControl): The flow control settings. - executor (~concurrent.futures.ThreadPoolExecutor): A + executor (~concurrent.futures.ThreadPoolExecutor): (Optional.) A ThreadPoolExecutor instance, or anything duck-type compatible with it. + queue (~queue.Queue): (Optional.) A Queue instance, appropriate + for crossing the concurrency boundary implemented by + ``executor``. """ # Default the callback to a no-op; it is provided by `.open`. self._callback = lambda message: None # Create a queue for keeping track of shared state. - self._request_queue = queue.Queue() + if queue is None: + queue = Queue() + self._request_queue = Queue() # Call the superclass constructor. super(Policy, self).__init__( @@ -67,9 +72,9 @@ def __init__(self, client, subscription, flow_control=types.FlowControl(), # Also maintain a request queue and an executor. logger.debug('Creating callback requests thread (not starting).') + if executor is None: + executor = futures.ThreadPoolExecutor(max_workers=10) self._executor = executor - if self._executor is None: - self._executor = futures.ThreadPoolExecutor(max_workers=10) self._callback_requests = _helper_threads.QueueCallbackThread( self._request_queue, self.on_callback_request, diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py index 289238b694b5..05a749d58425 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py @@ -19,6 +19,7 @@ from google.auth import credentials from google.cloud.pubsub_v1 import publisher from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.publisher.batch.base import BatchStatus from google.cloud.pubsub_v1.publisher.batch.thread import Batch @@ -41,14 +42,14 @@ def create_batch(status=None, settings=types.BatchSettings()): def test_len(): - batch = create_batch(status=Batch.Status.ACCEPTING_MESSAGES) + batch = create_batch(status=BatchStatus.ACCEPTING_MESSAGES) assert len(batch) == 0 batch.publish(types.PubsubMessage(data=b'foo')) assert len(batch) == 1 def test_will_accept(): - batch = create_batch(status=Batch.Status.ACCEPTING_MESSAGES) + batch = create_batch(status=BatchStatus.ACCEPTING_MESSAGES) message = types.PubsubMessage() assert batch.will_accept(message) is True @@ -62,7 +63,7 @@ def test_will_not_accept_status(): def test_will_not_accept_size(): batch = create_batch( settings=types.BatchSettings(max_bytes=10), - status=Batch.Status.ACCEPTING_MESSAGES, + status=BatchStatus.ACCEPTING_MESSAGES, ) message = types.PubsubMessage(data=b'abcdefghijklmnopqrstuvwxyz') assert batch.will_accept(message) is False diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py index 1e11f86b6c0b..00b761f52b96 100644 --- a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py @@ -17,12 +17,11 @@ import mock -import pytest - from google.auth import credentials from google.cloud.pubsub_v1 import publisher from google.cloud.pubsub_v1 import types from google.cloud.pubsub_v1.publisher import exceptions +from google.cloud.pubsub_v1.publisher.batch.base import BatchStatus from google.cloud.pubsub_v1.publisher.batch.thread import Batch @@ -61,7 +60,7 @@ def test_init(): Thread.assert_called_once_with(target=batch.monitor) # New batches start able to accept messages by default. - assert batch.status == batch.Status.ACCEPTING_MESSAGES + assert batch.status == BatchStatus.ACCEPTING_MESSAGES def test_init_infinite_latency(): @@ -87,7 +86,7 @@ def test_commit(): # The batch's status needs to be something other than "accepting messages", # since the commit started. - assert batch.status != batch.Status.ACCEPTING_MESSAGES + assert batch.status != BatchStatus.ACCEPTING_MESSAGES def test_blocking_commit(): diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py index 6210ea392315..76aec184815e 100644 --- a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py @@ -15,6 +15,7 @@ from __future__ import absolute_import from concurrent import futures +import queue import threading import grpc @@ -44,7 +45,7 @@ def test_init(): def test_init_with_executor(): executor = futures.ThreadPoolExecutor(max_workers=25) - policy = create_policy(executor=executor) + policy = create_policy(executor=executor, queue=queue.Queue()) assert policy._executor is executor From f1dde8fdde4275c7628d6850b03785a7f5dbcc98 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 23 Aug 2017 15:48:32 -0700 Subject: [PATCH 63/63] Pub/Sub Docs (#3849) --- docs/index.rst | 2 +- docs/pubsub/client.rst | 6 - docs/pubsub/iam.rst | 7 - docs/pubsub/index.rst | 117 ++ docs/pubsub/message.rst | 6 - docs/pubsub/publisher/api/batch.rst | 8 + docs/pubsub/publisher/api/client.rst | 6 + docs/pubsub/publisher/index.rst | 126 ++ docs/pubsub/snippets.py | 483 -------- docs/pubsub/subscriber/api/client.rst | 6 + docs/pubsub/subscriber/api/message.rst | 5 + docs/pubsub/subscriber/api/policy.rst | 5 + docs/pubsub/subscriber/index.rst | 123 ++ docs/pubsub/subscription.rst | 7 - docs/pubsub/topic.rst | 7 - docs/pubsub/types.rst | 5 + docs/pubsub/usage.rst | 245 ---- .../gapic/pubsub/v1/subscriber_client.py | 8 +- .../cloud/proto/pubsub/v1/pubsub_pb2.py | 1065 +++++++++++++++-- .../cloud/proto/pubsub/v1/pubsub_pb2_grpc.py | 48 + .../cloud/pubsub_v1/publisher/client.py | 15 +- .../cloud/pubsub_v1/publisher/exceptions.py | 9 +- .../cloud/pubsub_v1/subscriber/message.py | 11 +- .../pubsub_v1/subscriber/policy/thread.py | 6 +- pubsub/google/cloud/pubsub_v1/types.py | 7 +- 25 files changed, 1467 insertions(+), 866 deletions(-) delete mode 100644 docs/pubsub/client.rst delete mode 100644 docs/pubsub/iam.rst create mode 100644 docs/pubsub/index.rst delete mode 100644 docs/pubsub/message.rst create mode 100644 docs/pubsub/publisher/api/batch.rst create mode 100644 docs/pubsub/publisher/api/client.rst create mode 100644 docs/pubsub/publisher/index.rst delete mode 100644 docs/pubsub/snippets.py create mode 100644 docs/pubsub/subscriber/api/client.rst create mode 100644 docs/pubsub/subscriber/api/message.rst create mode 100644 docs/pubsub/subscriber/api/policy.rst create mode 100644 docs/pubsub/subscriber/index.rst delete mode 100644 docs/pubsub/subscription.rst delete mode 100644 docs/pubsub/topic.rst create mode 100644 docs/pubsub/types.rst delete mode 100644 docs/pubsub/usage.rst diff --git a/docs/index.rst b/docs/index.rst index ee47a2ac378f..d9afe5f31af4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -8,7 +8,7 @@ datastore/usage dns/usage language/usage - pubsub/usage + pubsub/index resource-manager/api runtimeconfig/usage spanner/usage diff --git a/docs/pubsub/client.rst b/docs/pubsub/client.rst deleted file mode 100644 index 2745c1d808ee..000000000000 --- a/docs/pubsub/client.rst +++ /dev/null @@ -1,6 +0,0 @@ -Pub/Sub Client -============== - -.. automodule:: google.cloud.pubsub.client - :members: - :show-inheritance: diff --git a/docs/pubsub/iam.rst b/docs/pubsub/iam.rst deleted file mode 100644 index 26943762605b..000000000000 --- a/docs/pubsub/iam.rst +++ /dev/null @@ -1,7 +0,0 @@ -IAM Policy -~~~~~~~~~~ - -.. automodule:: google.cloud.pubsub.iam - :members: - :member-order: bysource - :show-inheritance: diff --git a/docs/pubsub/index.rst b/docs/pubsub/index.rst new file mode 100644 index 000000000000..7b7438b29f9c --- /dev/null +++ b/docs/pubsub/index.rst @@ -0,0 +1,117 @@ +####### +Pub/Sub +####### + +`Google Cloud Pub/Sub`_ is a fully-managed real-time messaging service that +allows you to send and receive messages between independent applications. You +can leverage Cloud Pub/Sub’s flexibility to decouple systems and components +hosted on Google Cloud Platform or elsewhere on the Internet. By building on +the same technology Google uses, Cloud Pub/Sub is designed to provide “at +least once” delivery at low latency with on-demand scalability to 1 million +messages per second (and beyond). + +.. _Google Cloud Pub/Sub: https://cloud.google.com/pubsub/ + +******************************** +Authentication and Configuration +******************************** + +- For an overview of authentication in ``google-cloud-python``, + see :doc:`/core/auth`. + +- In addition to any authentication configuration, you should also set the + :envvar:`GOOGLE_CLOUD_PROJECT` environment variable for the project you'd + like to interact with. If the :envvar:`GOOGLE_CLOUD_PROJECT` environment + variable is not present, the project ID from JSON file credentials is used. + + If you are using Google App Engine or Google Compute Engine + this will be detected automatically. + +- After configuring your environment, create a + :class:`~google.cloud.pubsub_v1.PublisherClient` or + :class:`~google.cloud.pubsub_v1.SubscriberClient`. + +.. code-block:: python + + >>> from google.cloud import pubsub + >>> publisher = pubsub.PublisherClient() + >>> subscriber = pubsub.SubscriberClient() + +or pass in ``credentials`` explicitly. + +.. code-block:: python + + >>> from google.cloud import pubsub + >>> client = pubsub.PublisherClient( + ... credentials=creds, + ... ) + +********** +Publishing +********** + +To publish data to Cloud Pub/Sub you must create a topic, and then publish +messages to it + +.. code-block:: python + + >>> import os + >>> from google.cloud import pubsub + >>> + >>> publisher = pubsub.PublisherClient() + >>> topic = 'projects/{project_id}/topics/{topic}'.format( + ... project_id=os.getenv('GOOGLE_CLOUD_PROJECT'), + ... topic='MY_TOPIC_NAME', # Set this to something appropriate. + ... ) + >>> publisher.create_topic() + >>> publisher.publish(topic, b'My first message!', spam='eggs') + +To learn more, consult the :doc:`publishing documentation `. + + +*********** +Subscribing +*********** + +To subscribe to data in Cloud Pub/Sub, you create a subscription based on +the topic, and subscribe to that. + +.. code-block:: python + + >>> import os + >>> from google.cloud import pubsub + >>> + >>> subscriber = pubsub.SubscriberClient() + >>> topic = 'projects/{project_id}/topics/{topic}'.format( + ... project_id=os.getenv('GOOGLE_CLOUD_PROJECT'), + ... topic='MY_TOPIC_NAME', # Set this to something appropriate. + ... ) + >>> subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format( + ... project_id=os.getenv('GOOGLE_CLOUD_PROJECT'), + ... sub='MY_SUBSCRIPTION_NAME', # Set this to something appropriate. + ... ) + >>> subscription = subscriber.create_subscription(topic, subscription) + +The subscription is opened asychronously, and messages are processed by +use of a callback. + +.. code-block:: python + + >>> def callback(message): + ... print(message.data) + ... message.ack() + >>> subscription.open(callback) + +To learn more, consult the :doc:`subscriber documentation `. + + +********** +Learn More +********** + +.. toctree:: + :maxdepth: 3 + + publisher/index + subscriber/index + types diff --git a/docs/pubsub/message.rst b/docs/pubsub/message.rst deleted file mode 100644 index 654c607d46b3..000000000000 --- a/docs/pubsub/message.rst +++ /dev/null @@ -1,6 +0,0 @@ -Message -~~~~~~~ - -.. automodule:: google.cloud.pubsub.message - :members: - :show-inheritance: diff --git a/docs/pubsub/publisher/api/batch.rst b/docs/pubsub/publisher/api/batch.rst new file mode 100644 index 000000000000..5846d3ff9416 --- /dev/null +++ b/docs/pubsub/publisher/api/batch.rst @@ -0,0 +1,8 @@ +:orphan: + +Batch API +========= + +.. automodule:: google.cloud.pubsub_v1.publisher.batch.thread + :members: + :inherited-members: diff --git a/docs/pubsub/publisher/api/client.rst b/docs/pubsub/publisher/api/client.rst new file mode 100644 index 000000000000..47a3aa3d5d7a --- /dev/null +++ b/docs/pubsub/publisher/api/client.rst @@ -0,0 +1,6 @@ +Publisher Client API +==================== + +.. automodule:: google.cloud.pubsub_v1.publisher.client + :members: + :inherited-members: diff --git a/docs/pubsub/publisher/index.rst b/docs/pubsub/publisher/index.rst new file mode 100644 index 000000000000..72b374b588a3 --- /dev/null +++ b/docs/pubsub/publisher/index.rst @@ -0,0 +1,126 @@ +Publishing Messages +=================== + +Publishing messages is handled through the +:class:`~.pubsub_v1.publisher.client.Client` class (aliased as +``google.cloud.pubsub.PublisherClient``). This class provides methods to +create topics, and (most importantly) a +:meth:`~.pubsub_v1.publisher.client.Client.publish` method that publishes +messages to Pub/Sub. + +Instantiating a publishing client is straightforward: + +.. code-block:: python + + from google.cloud import pubsub + publish_client = pubsub.PublisherClient() + + +Publish a Message +----------------- + +To publish a message, use the +:meth:`~.pubsub_v1.publisher.client.Client.publish` method. This method accepts +two positional arguments: the topic to publish to, and the body of the message. +It also accepts arbitrary keyword arguments, which are passed along as +attributes of the message. + +The topic is passed along as a string; all topics have the canonical form of +``projects/{project_name}/topics/{topic_name}``. + +Therefore, a very basic publishing call looks like: + +.. code-block:: python + + topic = 'projects/{project}/topics/{topic}' + publish_client.publish(topic, b'This is my message.') + +.. note:: + + The message data in Pub/Sub is an opaque blob of bytes, and as such, you + *must* send a ``bytes`` object in Python 3 (``str`` object in Python 2). + If you send a text string (``str`` in Python 3, ``unicode`` in Python 2), + the method will raise :exc:`TypeError`. + + The reason it works this way is because there is no reasonable guarantee + that the same language or environment is being used by the subscriber, + and so it is the responsibility of the publisher to properly encode + the payload. + +If you want to include attributes, simply add keyword arguments: + +.. code-block:: python + + topic = 'projects/{project}/topics/{topic}' + publish_client.publish(topic, b'This is my message.', foo='bar') + + +Batching +-------- + +Whenever you publish a message, a +:class:`~.pubsub_v1.publisher.batch.thread.Batch` is automatically created. +This way, if you publish a large volume of messages, it reduces the number of +requests made to the server. + +The way that this works is that on the first message that you send, a new +:class:`~.pubsub_v1.publisher.batch.thread.Batch` is created automatically. +For every subsequent message, if there is already a valid batch that is still +accepting messages, then that batch is used. When the batch is created, it +begins a countdown that publishes the batch once sufficient time has +elapsed (by default, this is 0.05 seconds). + +If you need different batching settings, simply provide a +:class:`~.pubsub_v1.types.BatchSettings` object when you instantiate the +:class:`~.pubsub_v1.publisher.client.Client`: + +.. code-block:: python + + from google.cloud import pubsub + from google.cloud.pubsub import types + + client = pubsub.PublisherClient( + batch_settings=BatchSettings(max_messages=500), + ) + +Pub/Sub accepts a maximum of 1,000 messages in a batch, and the size of a +batch can not exceed 10 megabytes. + + +Futures +------- + +Every call to :meth:`~.pubsub_v1.publisher.client.Client.publish` will return +a class that conforms to the :class:`~concurrent.futures.Future` interface. +You can use this to ensure that the publish succeeded: + +.. code-block:: python + + # The .result() method will block until the future is complete. + # If there is an error, it will raise an exception. + future = client.publish(topic, b'My awesome message.') + message_id = future.result() + +You can also attach a callback to the future: + +.. code-block:: python + + # Callbacks receive the future as their only argument, as defined in + # the Future interface. + def callback(future): + message_id = future.result() + do_something_with(message_id) + + # The callback is added once you get the future. If you add a callback + # and the future is already done, it will simply be executed immediately. + future = client.publish(topic, b'My awesome message.') + future.add_done_callback(callback) + + +API Reference +------------- + +.. toctree:: + :maxdepth: 2 + + api/client diff --git a/docs/pubsub/snippets.py b/docs/pubsub/snippets.py deleted file mode 100644 index 96eea175c0cd..000000000000 --- a/docs/pubsub/snippets.py +++ /dev/null @@ -1,483 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Testable usage examples for Google Cloud Pubsub API wrapper - -Each example function takes a ``client`` argument (which must be an instance -of :class:`google.cloud.pubsub.client.Client`) and uses it to perform a task -with the API. - -To facilitate running the examples as system tests, each example is also passed -a ``to_delete`` list; the function adds to the list any objects created which -need to be deleted during teardown. -""" - -import time - -from google.cloud.pubsub.client import Client - - -def snippet(func): - """Mark ``func`` as a snippet example function.""" - func._snippet = True - return func - - -def _millis(): - return time.time() * 1000 - - -@snippet -def client_list_topics(client, to_delete): # pylint: disable=unused-argument - """List topics for a project.""" - - def do_something_with(sub): # pylint: disable=unused-argument - pass - - # [START client_list_topics] - for topic in client.list_topics(): # API request(s) - do_something_with(topic) - # [END client_list_topics] - - -@snippet -def client_list_subscriptions(client, - to_delete): # pylint: disable=unused-argument - """List all subscriptions for a project.""" - - def do_something_with(sub): # pylint: disable=unused-argument - pass - - # [START client_list_subscriptions] - for subscription in client.list_subscriptions(): # API request(s) - do_something_with(subscription) - # [END client_list_subscriptions] - - -@snippet -def client_topic(client, to_delete): # pylint: disable=unused-argument - """Topic factory.""" - TOPIC_NAME = 'topic_factory-%d' % (_millis(),) - - # [START client_topic] - topic = client.topic(TOPIC_NAME) - # [END client_topic] - - -@snippet -def client_subscription(client, to_delete): # pylint: disable=unused-argument - """Subscription factory.""" - SUBSCRIPTION_NAME = 'subscription_factory-%d' % (_millis(),) - - # [START client_subscription] - subscription = client.subscription( - SUBSCRIPTION_NAME, ack_deadline=60, - retain_acked_messages=True) - # [END client_subscription] - - -@snippet -def topic_create(client, to_delete): - """Create a topic.""" - TOPIC_NAME = 'topic_create-%d' % (_millis(),) - - # [START topic_create] - topic = client.topic(TOPIC_NAME) - topic.create() # API request - # [END topic_create] - - to_delete.append(topic) - - -@snippet -def topic_exists(client, to_delete): - """Test existence of a topic.""" - TOPIC_NAME = 'topic_exists-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - to_delete.append(topic) - - # [START topic_exists] - assert not topic.exists() # API request - topic.create() # API request - assert topic.exists() # API request - # [END topic_exists] - - -@snippet -def topic_delete(client, to_delete): # pylint: disable=unused-argument - """Delete a topic.""" - TOPIC_NAME = 'topic_delete-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() # API request - - # [START topic_delete] - assert topic.exists() # API request - topic.delete() - assert not topic.exists() # API request - # [END topic_delete] - - -@snippet -def topic_iam_policy(client, to_delete): - """Fetch / set a topic's IAM policy.""" - TOPIC_NAME = 'topic_iam_policy-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - # [START topic_get_iam_policy] - policy = topic.get_iam_policy() # API request - # [END topic_get_iam_policy] - - assert len(policy.viewers) == 0 - assert len(policy.editors) == 0 - assert len(policy.owners) == 0 - - # [START topic_set_iam_policy] - ALL_USERS = policy.all_users() - policy.viewers = [ALL_USERS] - LOGS_GROUP = policy.group('cloud-logs@google.com') - policy.editors = [LOGS_GROUP] - new_policy = topic.set_iam_policy(policy) # API request - # [END topic_set_iam_policy] - - assert ALL_USERS in new_policy.viewers - assert LOGS_GROUP in new_policy.editors - - -# @snippet # Disabled due to #1687 -def topic_check_iam_permissions(client, to_delete): - """Check topic IAM permissions.""" - TOPIC_NAME = 'topic_check_iam_permissions-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - # [START topic_check_iam_permissions] - from google.cloud.pubsub.iam import OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE - TO_CHECK = [OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE] - ALLOWED = topic.check_iam_permissions(TO_CHECK) - assert set(ALLOWED) == set(TO_CHECK) - # [END topic_check_iam_permissions] - - -@snippet -def topic_publish_messages(client, to_delete): - """Publish messages to a topic.""" - TOPIC_NAME = 'topic_publish_messages-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - # [START topic_publish_simple_message] - topic.publish(b'This is the message payload') # API request - # [END topic_publish_simple_message] - - # [START topic_publish_message_with_attrs] - topic.publish(b'Another message payload', extra='EXTRA') # API request - # [END topic_publish_message_with_attrs] - - -@snippet -def topic_subscription(client, to_delete): - """Create subscriptions to a topic.""" - TOPIC_NAME = 'topic_subscription-%d' % (_millis(),) - SUB_DEFAULTS = 'topic_subscription-defaults-%d' % (_millis(),) - SUB_ACK90 = 'topic_subscription-ack90-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - # [START topic_subscription_defaults] - sub_defaults = topic.subscription(SUB_DEFAULTS) - # [END topic_subscription_defaults] - - sub_defaults.create() # API request - to_delete.append(sub_defaults) - expected_names = set() - expected_names.add(sub_defaults.full_name) - - # [START topic_subscription_ack90] - sub_ack90 = topic.subscription(SUB_ACK90, ack_deadline=90) - # [END topic_subscription_ack90] - - sub_ack90.create() # API request - to_delete.append(sub_ack90) - expected_names.add(sub_ack90.full_name) - - sub_names = set() - - def do_something_with(sub): - sub_names.add(sub.full_name) - - # [START topic_list_subscriptions] - for subscription in topic.list_subscriptions(): # API request(s) - do_something_with(subscription) - # [END topic_list_subscriptions] - - assert sub_names.issuperset(expected_names) - - -# @snippet: disabled, because push-mode requires a validated endpoint URL -def topic_subscription_push(client, to_delete): - """Create subscriptions to a topic.""" - TOPIC_NAME = 'topic_subscription_push-%d' % (_millis(),) - SUB_PUSH = 'topic_subscription_push-sub-%d' % (_millis(),) - PUSH_URL = 'https://api.example.com/push-endpoint' - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - # [START topic_subscription_push] - subscription = topic.subscription(SUB_PUSH, push_endpoint=PUSH_URL) - subscription.create() # API request - # [END topic_subscription_push] - - # [START subscription_push_pull] - subscription.modify_push_configuration(push_endpoint=None) # API request - # [END subscription_push_pull] - - # [START subscription_pull_push] - subscription.modify_push_configuration( - push_endpoint=PUSH_URL) # API request - # [END subscription_pull_push] - - -@snippet -def subscription_lifecycle(client, to_delete): - """Test lifecycle of a subscription.""" - TOPIC_NAME = 'subscription_lifecycle-%d' % (_millis(),) - SUB_NAME = 'subscription_lifecycle-defaults-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - # [START subscription_create] - subscription = topic.subscription(SUB_NAME) - subscription.create() # API request - # [END subscription_create] - - # [START subscription_exists] - assert subscription.exists() # API request - # [END subscription_exists] - - # [START subscription_reload] - subscription.reload() # API request - # [END subscription_reload] - - # [START subscription_delete] - subscription.delete() # API request - # [END subscription_delete] - - -@snippet -def subscription_pull(client, to_delete): - """Pull messges from a subscribed topic.""" - TOPIC_NAME = 'subscription_pull-%d' % (_millis(),) - SUB_NAME = 'subscription_pull-defaults-%d' % (_millis(),) - PAYLOAD1 = b'PAYLOAD1' - PAYLOAD2 = b'PAYLOAD2' - EXTRA = 'EXTRA' - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - subscription = topic.subscription(SUB_NAME) - subscription.create() - to_delete.append(subscription) - - # [START subscription_pull_return_immediately] - pulled = subscription.pull(return_immediately=True) - # [END subscription_pull_return_immediately] - assert len(pulled) == 0, "unexpected message" - - topic.publish(PAYLOAD1) - topic.publish(PAYLOAD2, extra=EXTRA) - - time.sleep(1) # eventually-consistent - - # [START subscription_pull] - pulled = subscription.pull(max_messages=2) - # [END subscription_pull] - - assert len(pulled) == 2, "eventual consistency" - - # [START subscription_modify_ack_deadline] - for ack_id, _ in pulled: - subscription.modify_ack_deadline(ack_id, 90) # API request - # [END subscription_modify_ack_deadline] - - payloads = [] - extras = [] - - def do_something_with(message): # pylint: disable=unused-argument - payloads.append(message.data) - if message.attributes: - extras.append(message.attributes) - - class ApplicationException(Exception): - pass - - def log_exception(_): - pass - - # [START subscription_acknowledge] - for ack_id, message in pulled: - try: - do_something_with(message) - except ApplicationException as e: - log_exception(e) - else: - subscription.acknowledge([ack_id]) - # [END subscription_acknowledge] - - assert set(payloads) == set([PAYLOAD1, PAYLOAD2]), 'payloads: %s' % ( - (payloads,)) - assert extras == [{'extra': EXTRA}], 'extras: %s' % ( - (extras,)) - - -@snippet -def subscription_pull_w_autoack(client, to_delete): - """Pull messges from a topic, auto-acknowldging them""" - TOPIC_NAME = 'subscription_pull_autoack-%d' % (_millis(),) - SUB_NAME = 'subscription_pull_autoack-defaults-%d' % (_millis(),) - PAYLOAD1 = b'PAYLOAD1' - PAYLOAD2 = b'PAYLOAD2' - EXTRA = 'EXTRA' - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - subscription = topic.subscription(SUB_NAME) - subscription.create() - to_delete.append(subscription) - - # [START topic_batch] - with topic.batch() as batch: - batch.publish(PAYLOAD1) - batch.publish(PAYLOAD2, extra=EXTRA) - # [END topic_batch] - - time.sleep(1) # eventually-consistent - - payloads = [] - extras = [] - - def do_something_with(message): # pylint: disable=unused-argument - payloads.append(message.data) - if message.attributes: - extras.append(message.attributes) - - # [START subscription_pull_autoack] - from google.cloud.pubsub.subscription import AutoAck - with AutoAck(subscription, max_messages=10) as ack: - for ack_id, message in list(ack.items()): - try: - do_something_with(message) - except Exception: # pylint: disable=broad-except - del ack[ack_id] - # [END subscription_pull_autoack] - - assert set(payloads) == set(PAYLOAD1, PAYLOAD1), "eventual consistency" - assert extras == [{'extra': EXTRA}], "eventual consistency" - - -@snippet -def subscription_iam_policy(client, to_delete): - """Fetch / set a subscription's IAM policy.""" - TOPIC_NAME = 'subscription_iam_policy-%d' % (_millis(),) - SUB_NAME = 'subscription_iam_policy-defaults-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - subscription = topic.subscription(SUB_NAME) - subscription.create() - to_delete.append(subscription) - - # [START subscription_get_iam_policy] - policy = subscription.get_iam_policy() # API request - # [END subscription_get_iam_policy] - - assert len(policy.viewers) == 0 - assert len(policy.editors) == 0 - assert len(policy.owners) == 0 - - # [START subscription_set_iam_policy] - ALL_USERS = policy.all_users() - policy.viewers = [ALL_USERS] - LOGS_GROUP = policy.group('cloud-logs@google.com') - policy.editors = [LOGS_GROUP] - new_policy = subscription.set_iam_policy(policy) # API request - # [END subscription_set_iam_policy] - - assert ALL_USERS in new_policy.viewers - assert LOGS_GROUP in new_policy.editors - - -# @snippet # Disabled due to #1687 -def subscription_check_iam_permissions(client, to_delete): - """Check subscription IAM permissions.""" - TOPIC_NAME = 'subscription_check_iam_permissions-%d' % (_millis(),) - SUB_NAME = 'subscription_check_iam_permissions-defaults-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - subscription = topic.subscription(SUB_NAME) - subscription.create() - to_delete.append(subscription) - - # [START subscription_check_iam_permissions] - from google.cloud.pubsub.iam import OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE - TO_CHECK = [OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE] - ALLOWED = subscription.check_iam_permissions(TO_CHECK) - assert set(ALLOWED) == set(TO_CHECK) - # [END subscription_check_iam_permissions] - - -def _line_no(func): - code = getattr(func, '__code__', None) or getattr(func, 'func_code') - return code.co_firstlineno - - -def _find_examples(): - funcs = [obj for obj in globals().values() - if getattr(obj, '_snippet', False)] - for func in sorted(funcs, key=_line_no): - yield func - - -def _name_and_doc(func): - return func.__name__, func.__doc__ - - -def main(): - client = Client() - for example in _find_examples(): - to_delete = [] - print('%-25s: %s' % _name_and_doc(example)) - try: - example(client, to_delete) - except AssertionError as e: - print(' FAIL: %s' % (e,)) - except Exception as e: # pylint: disable=broad-except - print(' ERROR: %r' % (e,)) - for item in to_delete: - item.delete() - - -if __name__ == '__main__': - main() diff --git a/docs/pubsub/subscriber/api/client.rst b/docs/pubsub/subscriber/api/client.rst new file mode 100644 index 000000000000..965880c5a640 --- /dev/null +++ b/docs/pubsub/subscriber/api/client.rst @@ -0,0 +1,6 @@ +Subscriber Client API +===================== + +.. automodule:: google.cloud.pubsub_v1.subscriber.client + :members: + :inherited-members: diff --git a/docs/pubsub/subscriber/api/message.rst b/docs/pubsub/subscriber/api/message.rst new file mode 100644 index 000000000000..d6566f4c363e --- /dev/null +++ b/docs/pubsub/subscriber/api/message.rst @@ -0,0 +1,5 @@ +Messages +======== + +.. autoclass:: google.cloud.pubsub_v1.subscriber.message.Message + :members: ack, attributes, data, nack, publish_time diff --git a/docs/pubsub/subscriber/api/policy.rst b/docs/pubsub/subscriber/api/policy.rst new file mode 100644 index 000000000000..95d288d0b974 --- /dev/null +++ b/docs/pubsub/subscriber/api/policy.rst @@ -0,0 +1,5 @@ +Subscriptions +============= + +.. autoclass:: google.cloud.pubsub_v1.subscriber.policy.thread.Policy + :members: open, close diff --git a/docs/pubsub/subscriber/index.rst b/docs/pubsub/subscriber/index.rst new file mode 100644 index 000000000000..be32a9e9ed97 --- /dev/null +++ b/docs/pubsub/subscriber/index.rst @@ -0,0 +1,123 @@ +Subscribing to Messages +======================= + +Subscribing to messages is handled through the +:class:`~.pubsub_v1.subscriber.client.Client` class (aliased as +``google.cloud.pubsub.SubscriberClient``). This class provides a +:meth:`~.pubsub_v1.subscriber.client.Client.subscribe` method to +attach to subscriptions on existing topics, and (most importantly) a +:meth:`~.pubsub_v1.subscriber.policy.thread.Policy.open` method that +consumes messages from Pub/Sub. + +Instantiating a subscriber client is straightforward: + +.. code-block:: python + + from google.cloud import pubsub + subscriber = pubsub.SubscriberClient() + + +Creating a Subscription +----------------------- + +In Pub/Sub, a **subscription** is a discrete pull of messages from a topic. +If multiple clients pull the same subscription, then messages are split +between them. If multiple clients create a subscription each, then each client +will get every message. + +.. note:: + + Remember that Pub/Sub operates under the principle of "everything at least + once". Even in the case where multiple clients pull the same subscription, + *some* redundancy is likely. + +Creating a subscription requires that you already know what topic you want +to subscribe to, and it must already exist. Once you have that, it is easy: + +.. code-block:: python + + # Substitute {project}, {topic}, and {subscription} with appropriate + # values for your application. + topic_name = 'projects/{project}/topics/{topic}' + sub_name = 'projects/{project}/subscriptions/{subscription}' + subscriber.create_subscription(topic_name, sub_name) + + +Pulling a Subscription +---------------------- + +Once you have created a subscription (or if you already had one), the next +step is to pull data from it. This entails two steps: first you must call +:meth:`~.pubsub_v1.subscriber.client.Client.subscribe`, passing in the +subscription string. + +.. code-block:: python + + # As before, substitute {project} and {subscription} with appropriate + # values for your application. + subscription = subscriber.subscribe( + 'projects/{project}/subscriptions/{subscription}', + ) + +This will return an object with an +:meth:`~.pubsub_v1.subscriber.policy.thread.Policy.open` method; calling +this method will actually begin consumption of the subscription. + + +Subscription Callbacks +---------------------- + +Because subscriptions in this Pub/Sub client are opened asychronously, +processing the messages that are yielded by the subscription is handled +through **callbacks**. + +The basic idea: Define a function that takes one argument; this argument +will be a :class:`~.pubsub_v1.subscriber.message.Message` instance. This +function should do whatever processing is necessary. At the end, the +function should :meth:`~.pubsub_v1.subscriber.message.Message.ack` the +message. + +When you call :meth:`~.pubsub_v1.subscriber.policy.thread.Policy.open`, you +must pass the callback that will be used. + +Here is an example: + +.. code-block:: python + + # Define the callback. + # Note that the callback is defined *before* the subscription is opened. + def callback(message): + do_something_with(message) # Replace this with your acutal logic. + message.ack() + + # Open the subscription, passing the callback. + subscription.open(callback) + +Explaining Ack +-------------- + +In Pub/Sub, the term **ack** stands for "acknowledge". You should ack a +message when your processing of that message *has completed*. When you ack +a message, you are telling Pub/Sub that you do not need to see it again. + +It might be tempting to ack messages immediately on receipt. While there +are valid use cases for this, in general it is unwise. The reason why: If +there is some error or edge case in your processing logic, and processing +of the message fails, you will have already told Pub/Sub that you successfully +processed the message. By contrast, if you ack only upon completion, then +Pub/Sub will eventually re-deliver the unacknowledged message. + +It is also possible to **nack** a message, which is the opposite. When you +nack, it tells Pub/Sub that you are unable or unwilling to deal with the +message, and that the service should redeliver it. + + +API Reference +------------- + +.. toctree:: + :maxdepth: 2 + + api/client + api/policy + api/message diff --git a/docs/pubsub/subscription.rst b/docs/pubsub/subscription.rst deleted file mode 100644 index f242cb644e83..000000000000 --- a/docs/pubsub/subscription.rst +++ /dev/null @@ -1,7 +0,0 @@ -Subscriptions -~~~~~~~~~~~~~ - -.. automodule:: google.cloud.pubsub.subscription - :members: - :member-order: bysource - :show-inheritance: diff --git a/docs/pubsub/topic.rst b/docs/pubsub/topic.rst deleted file mode 100644 index 323d467a08ce..000000000000 --- a/docs/pubsub/topic.rst +++ /dev/null @@ -1,7 +0,0 @@ -Topics -~~~~~~ - -.. automodule:: google.cloud.pubsub.topic - :members: - :member-order: bysource - :show-inheritance: diff --git a/docs/pubsub/types.rst b/docs/pubsub/types.rst new file mode 100644 index 000000000000..87c987571766 --- /dev/null +++ b/docs/pubsub/types.rst @@ -0,0 +1,5 @@ +Pub/Sub Client Types +==================== + +.. automodule:: google.cloud.pubsub_v1.types + :members: diff --git a/docs/pubsub/usage.rst b/docs/pubsub/usage.rst deleted file mode 100644 index 96727e654835..000000000000 --- a/docs/pubsub/usage.rst +++ /dev/null @@ -1,245 +0,0 @@ -Pub / Sub -========= - - -.. toctree:: - :maxdepth: 2 - :hidden: - - client - topic - subscription - message - iam - -Authentication / Configuration ------------------------------- - -- Use :class:`Client ` objects to configure - your applications. - -- In addition to any authentication configuration, you should also set the - :envvar:`GOOGLE_CLOUD_PROJECT` environment variable for the project you'd like - to interact with. If you are Google App Engine or Google Compute Engine - this will be detected automatically. - -- The library now enables the ``gRPC`` transport for the pubsub API by - default, assuming that the required dependencies are installed and - importable. To *disable* this transport, set the - :envvar:`GOOGLE_CLOUD_DISABLE_GRPC` environment variable to a - non-empty string, e.g.: ``$ export GOOGLE_CLOUD_DISABLE_GRPC=true``. - -- :class:`Client ` objects hold both a ``project`` - and an authenticated connection to the PubSub service. - -- The authentication credentials can be implicitly determined from the - environment or directly via - :meth:`from_service_account_json ` - and - :meth:`from_service_account_p12 `. - -- After setting ``GOOGLE_APPLICATION_CREDENTIALS`` and ``GOOGLE_CLOUD_PROJECT`` - environment variables, create a :class:`Client ` - - .. code-block:: python - - >>> from google.cloud import pubsub - >>> client = pubsub.Client() - - -Manage topics for a project ---------------------------- - -List topics for the default project: - -.. literalinclude:: snippets.py - :start-after: [START client_list_topics] - :end-before: [END client_list_topics] - -Create a new topic for the default project: - -.. literalinclude:: snippets.py - :start-after: [START topic_create] - :end-before: [END topic_create] - -Check for the existence of a topic: - -.. literalinclude:: snippets.py - :start-after: [START topic_exists] - :end-before: [END topic_exists] - -Delete a topic: - -.. literalinclude:: snippets.py - :start-after: [START topic_delete] - :end-before: [END topic_delete] - -Fetch the IAM policy for a topic: - -.. literalinclude:: snippets.py - :start-after: [START topic_get_iam_policy] - :end-before: [END topic_get_iam_policy] - -Update the IAM policy for a topic: - -.. literalinclude:: snippets.py - :start-after: [START topic_set_iam_policy] - :end-before: [END topic_set_iam_policy] - -Test permissions allowed by the current IAM policy on a topic: - -.. literalinclude:: snippets.py - :start-after: [START topic_check_iam_permissions] - :end-before: [END topic_check_iam_permissions] - - -Publish messages to a topic ---------------------------- - -Publish a single message to a topic, without attributes: - -.. literalinclude:: snippets.py - :start-after: [START topic_publish_simple_message] - :end-before: [END topic_publish_simple_message] - -Publish a single message to a topic, with attributes: - -.. literalinclude:: snippets.py - :start-after: [START topic_publish_message_with_attrs] - :end-before: [END topic_publish_message_with_attrs] - -Publish a set of messages to a topic (as a single request): - -.. literalinclude:: snippets.py - :start-after: [START topic_batch] - :end-before: [END topic_batch] - -.. note:: - - The only API request happens during the ``__exit__()`` of the topic - used as a context manager, and only if the block exits without raising - an exception. - - -Manage subscriptions to topics ------------------------------- - -List all subscriptions for the default project: - -.. literalinclude:: snippets.py - :start-after: [START client_list_subscriptions] - :end-before: [END client_list_subscriptions] - -List subscriptions for a topic: - -.. literalinclude:: snippets.py - :start-after: [START topic_list_subscriptions] - :end-before: [END topic_list_subscriptions] - -Create a new pull subscription for a topic, with defaults: - -.. literalinclude:: snippets.py - :start-after: [START topic_subscription_defaults] - :end-before: [END topic_subscription_defaults] - -Create a new pull subscription for a topic with a non-default ACK deadline: - -.. literalinclude:: snippets.py - :start-after: [START topic_subscription_ack90] - :end-before: [END topic_subscription_ack90] - -Create a new push subscription for a topic: - -.. literalinclude:: snippets.py - :start-after: [START topic_subscription_push] - :end-before: [END topic_subscription_push] - -Check for the existence of a subscription: - -.. literalinclude:: snippets.py - :start-after: [START subscription_exists] - :end-before: [END subscription_exists] - -Convert a pull subscription to push: - -.. literalinclude:: snippets.py - :start-after: [START subscription_pull_push] - :end-before: [END subscription_pull_push] - -Convert a push subscription to pull: - -.. literalinclude:: snippets.py - :start-after: [START subscription_push_pull] - :end-before: [END subscription_push_pull] - -Re-synchronize a subscription with the back-end: - -.. literalinclude:: snippets.py - :start-after: [START subscription_reload] - :end-before: [END subscription_reload] - -Fetch the IAM policy for a subscription - -.. literalinclude:: snippets.py - :start-after: [START subscription_get_iam_policy] - :end-before: [END subscription_get_iam_policy] - -Update the IAM policy for a subscription: - -.. literalinclude:: snippets.py - :start-after: [START subscription_set_iam_policy] - :end-before: [END subscription_set_iam_policy] - -Test permissions allowed by the current IAM policy on a subscription: - -.. literalinclude:: snippets.py - :start-after: [START subscription_check_iam_permissions] - :end-before: [END subscription_check_iam_permissions] - -Delete a subscription: - -.. literalinclude:: snippets.py - :start-after: [START subscription_delete] - :end-before: [END subscription_delete] - - -Pull messages from a subscription ---------------------------------- - -Fetch pending messages for a pull subscription: - -.. literalinclude:: snippets.py - :start-after: [START subscription_pull] - :end-before: [END subscription_pull] - -Note that received messages must be acknowledged, or else the back-end -will re-send them later: - -.. literalinclude:: snippets.py - :start-after: [START subscription_acknowledge] - :end-before: [END subscription_acknowledge] - -Fetch messages for a pull subscription without blocking (none pending): - -.. literalinclude:: snippets.py - :start-after: [START subscription_pull_return_immediately] - :end-before: [END subscription_pull_return_immediately] - -Update the acknowlegement deadline for pulled messages: - -.. literalinclude:: snippets.py - :start-after: [START subscription_modify_ack_deadline] - :end-before: [END subscription_modify_ack_deadline] - -Fetch pending messages, acknowledging those whose processing doesn't raise an -error: - -.. literalinclude:: snippets.py - :start-after: [START subscription_pull_autoack] - :end-before: [END subscription_pull_autoack] - -.. note:: - - The ``pull`` API request occurs at entry to the ``with`` block, and the - ``acknowlege`` API request occurs at the end, passing only the ``ack_ids`` - which haven't been deleted from ``ack`` diff --git a/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client.py b/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client.py index ab8233824595..5313e0d941a1 100644 --- a/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client.py +++ b/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client.py @@ -861,16 +861,14 @@ def create_snapshot(self, name, subscription, options=None): Format is ``projects/{project}/snapshots/{snap}``. subscription (string): The subscription whose backlog the snapshot retains. Specifically, the created snapshot is guaranteed to retain: - (a) The existing backlog on the subscription. More precisely, this is - :: + - The existing backlog on the subscription. More precisely, this is defined as the messages in the subscription's backlog that are unacknowledged upon the successful completion of the `CreateSnapshot` request; as well as: - (b) Any messages published to the subscription's topic following the - :: - + - Any messages published to the subscription's topic following the successful completion of the CreateSnapshot request. + Format is ``projects/{project}/subscriptions/{sub}``. options (:class:`google.gax.CallOptions`): Overrides the default settings for this call, e.g, timeout, retries etc. diff --git a/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2.py b/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2.py index 07919f8c5646..aeee99e182d0 100644 --- a/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2.py +++ b/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2.py @@ -24,7 +24,7 @@ name='google/cloud/proto/pubsub/v1/pubsub.proto', package='google.pubsub.v1', syntax='proto3', - serialized_pb=_b('\n)google/cloud/proto/pubsub/v1/pubsub.proto\x12\x10google.pubsub.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x15\n\x05Topic\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xdb\x01\n\rPubsubMessage\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\x43\n\nattributes\x18\x02 \x03(\x0b\x32/.google.pubsub.v1.PubsubMessage.AttributesEntry\x12\x12\n\nmessage_id\x18\x03 \x01(\t\x12\x30\n\x0cpublish_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\x31\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\" \n\x0fGetTopicRequest\x12\r\n\x05topic\x18\x01 \x01(\t\"R\n\x0ePublishRequest\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x31\n\x08messages\x18\x02 \x03(\x0b\x32\x1f.google.pubsub.v1.PubsubMessage\"&\n\x0fPublishResponse\x12\x13\n\x0bmessage_ids\x18\x01 \x03(\t\"K\n\x11ListTopicsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"V\n\x12ListTopicsResponse\x12\'\n\x06topics\x18\x01 \x03(\x0b\x32\x17.google.pubsub.v1.Topic\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"U\n\x1dListTopicSubscriptionsRequest\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"P\n\x1eListTopicSubscriptionsResponse\x12\x15\n\rsubscriptions\x18\x01 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"#\n\x12\x44\x65leteTopicRequest\x12\r\n\x05topic\x18\x01 \x01(\t\"\xda\x01\n\x0cSubscription\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\x31\n\x0bpush_config\x18\x04 \x01(\x0b\x32\x1c.google.pubsub.v1.PushConfig\x12\x1c\n\x14\x61\x63k_deadline_seconds\x18\x05 \x01(\x05\x12\x1d\n\x15retain_acked_messages\x18\x07 \x01(\x08\x12=\n\x1amessage_retention_duration\x18\x08 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x98\x01\n\nPushConfig\x12\x15\n\rpush_endpoint\x18\x01 \x01(\t\x12@\n\nattributes\x18\x02 \x03(\x0b\x32,.google.pubsub.v1.PushConfig.AttributesEntry\x1a\x31\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"S\n\x0fReceivedMessage\x12\x0e\n\x06\x61\x63k_id\x18\x01 \x01(\t\x12\x30\n\x07message\x18\x02 \x01(\x0b\x32\x1f.google.pubsub.v1.PubsubMessage\".\n\x16GetSubscriptionRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\"\x82\x01\n\x19UpdateSubscriptionRequest\x12\x34\n\x0csubscription\x18\x01 \x01(\x0b\x32\x1e.google.pubsub.v1.Subscription\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"R\n\x18ListSubscriptionsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"k\n\x19ListSubscriptionsResponse\x12\x35\n\rsubscriptions\x18\x01 \x03(\x0b\x32\x1e.google.pubsub.v1.Subscription\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"1\n\x19\x44\x65leteSubscriptionRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\"b\n\x17ModifyPushConfigRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x31\n\x0bpush_config\x18\x02 \x01(\x0b\x32\x1c.google.pubsub.v1.PushConfig\"U\n\x0bPullRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x1a\n\x12return_immediately\x18\x02 \x01(\x08\x12\x14\n\x0cmax_messages\x18\x03 \x01(\x05\"L\n\x0cPullResponse\x12<\n\x11received_messages\x18\x01 \x03(\x0b\x32!.google.pubsub.v1.ReceivedMessage\"_\n\x18ModifyAckDeadlineRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x04 \x03(\t\x12\x1c\n\x14\x61\x63k_deadline_seconds\x18\x03 \x01(\x05\";\n\x12\x41\x63knowledgeRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x02 \x03(\t\"\xa4\x01\n\x14StreamingPullRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x02 \x03(\t\x12\x1f\n\x17modify_deadline_seconds\x18\x03 \x03(\x05\x12\x1f\n\x17modify_deadline_ack_ids\x18\x04 \x03(\t\x12#\n\x1bstream_ack_deadline_seconds\x18\x05 \x01(\x05\"U\n\x15StreamingPullResponse\x12<\n\x11received_messages\x18\x01 \x03(\x0b\x32!.google.pubsub.v1.ReceivedMessage\";\n\x15\x43reateSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csubscription\x18\x02 \x01(\t\"X\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12/\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x14ListSnapshotsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"_\n\x15ListSnapshotsResponse\x12-\n\tsnapshots\x18\x01 \x03(\x0b\x32\x1a.google.pubsub.v1.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\")\n\x15\x44\x65leteSnapshotRequest\x12\x10\n\x08snapshot\x18\x01 \x01(\t\"m\n\x0bSeekRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12*\n\x04time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x12\n\x08snapshot\x18\x03 \x01(\tH\x00\x42\x08\n\x06target\"\x0e\n\x0cSeekResponse2\xe8\x0f\n\nSubscriber\x12\x86\x01\n\x12\x43reateSubscription\x12\x1e.google.pubsub.v1.Subscription\x1a\x1e.google.pubsub.v1.Subscription\"0\x82\xd3\xe4\x93\x02*\x1a%/v1/{name=projects/*/subscriptions/*}:\x01*\x12\x92\x01\n\x0fGetSubscription\x12(.google.pubsub.v1.GetSubscriptionRequest\x1a\x1e.google.pubsub.v1.Subscription\"5\x82\xd3\xe4\x93\x02/\x12-/v1/{subscription=projects/*/subscriptions/*}\x12\xa0\x01\n\x12UpdateSubscription\x12+.google.pubsub.v1.UpdateSubscriptionRequest\x1a\x1e.google.pubsub.v1.Subscription\"=\x82\xd3\xe4\x93\x02\x37\x32\x32/v1/{subscription.name=projects/*/subscriptions/*}:\x01*\x12\x9c\x01\n\x11ListSubscriptions\x12*.google.pubsub.v1.ListSubscriptionsRequest\x1a+.google.pubsub.v1.ListSubscriptionsResponse\".\x82\xd3\xe4\x93\x02(\x12&/v1/{project=projects/*}/subscriptions\x12\x90\x01\n\x12\x44\x65leteSubscription\x12+.google.pubsub.v1.DeleteSubscriptionRequest\x1a\x16.google.protobuf.Empty\"5\x82\xd3\xe4\x93\x02/*-/v1/{subscription=projects/*/subscriptions/*}\x12\xa3\x01\n\x11ModifyAckDeadline\x12*.google.pubsub.v1.ModifyAckDeadlineRequest\x1a\x16.google.protobuf.Empty\"J\x82\xd3\xe4\x93\x02\x44\"?/v1/{subscription=projects/*/subscriptions/*}:modifyAckDeadline:\x01*\x12\x91\x01\n\x0b\x41\x63knowledge\x12$.google.pubsub.v1.AcknowledgeRequest\x1a\x16.google.protobuf.Empty\"D\x82\xd3\xe4\x93\x02>\"9/v1/{subscription=projects/*/subscriptions/*}:acknowledge:\x01*\x12\x84\x01\n\x04Pull\x12\x1d.google.pubsub.v1.PullRequest\x1a\x1e.google.pubsub.v1.PullResponse\"=\x82\xd3\xe4\x93\x02\x37\"2/v1/{subscription=projects/*/subscriptions/*}:pull:\x01*\x12\x64\n\rStreamingPull\x12&.google.pubsub.v1.StreamingPullRequest\x1a\'.google.pubsub.v1.StreamingPullResponse(\x01\x30\x01\x12\xa0\x01\n\x10ModifyPushConfig\x12).google.pubsub.v1.ModifyPushConfigRequest\x1a\x16.google.protobuf.Empty\"I\x82\xd3\xe4\x93\x02\x43\">/v1/{subscription=projects/*/subscriptions/*}:modifyPushConfig:\x01*\x12\x8c\x01\n\rListSnapshots\x12&.google.pubsub.v1.ListSnapshotsRequest\x1a\'.google.pubsub.v1.ListSnapshotsResponse\"*\x82\xd3\xe4\x93\x02$\x12\"/v1/{project=projects/*}/snapshots\x12\x83\x01\n\x0e\x43reateSnapshot\x12\'.google.pubsub.v1.CreateSnapshotRequest\x1a\x1a.google.pubsub.v1.Snapshot\",\x82\xd3\xe4\x93\x02&\x1a!/v1/{name=projects/*/snapshots/*}:\x01*\x12\x80\x01\n\x0e\x44\x65leteSnapshot\x12\'.google.pubsub.v1.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty\"-\x82\xd3\xe4\x93\x02\'*%/v1/{snapshot=projects/*/snapshots/*}\x12\x84\x01\n\x04Seek\x12\x1d.google.pubsub.v1.SeekRequest\x1a\x1e.google.pubsub.v1.SeekResponse\"=\x82\xd3\xe4\x93\x02\x37\"2/v1/{subscription=projects/*/subscriptions/*}:seek:\x01*2\x9b\x06\n\tPublisher\x12j\n\x0b\x43reateTopic\x12\x17.google.pubsub.v1.Topic\x1a\x17.google.pubsub.v1.Topic\")\x82\xd3\xe4\x93\x02#\x1a\x1e/v1/{name=projects/*/topics/*}:\x01*\x12\x82\x01\n\x07Publish\x12 .google.pubsub.v1.PublishRequest\x1a!.google.pubsub.v1.PublishResponse\"2\x82\xd3\xe4\x93\x02,\"\'/v1/{topic=projects/*/topics/*}:publish:\x01*\x12o\n\x08GetTopic\x12!.google.pubsub.v1.GetTopicRequest\x1a\x17.google.pubsub.v1.Topic\"\'\x82\xd3\xe4\x93\x02!\x12\x1f/v1/{topic=projects/*/topics/*}\x12\x80\x01\n\nListTopics\x12#.google.pubsub.v1.ListTopicsRequest\x1a$.google.pubsub.v1.ListTopicsResponse\"\'\x82\xd3\xe4\x93\x02!\x12\x1f/v1/{project=projects/*}/topics\x12\xb2\x01\n\x16ListTopicSubscriptions\x12/.google.pubsub.v1.ListTopicSubscriptionsRequest\x1a\x30.google.pubsub.v1.ListTopicSubscriptionsResponse\"5\x82\xd3\xe4\x93\x02/\x12-/v1/{topic=projects/*/topics/*}/subscriptions\x12t\n\x0b\x44\x65leteTopic\x12$.google.pubsub.v1.DeleteTopicRequest\x1a\x16.google.protobuf.Empty\"\'\x82\xd3\xe4\x93\x02!*\x1f/v1/{topic=projects/*/topics/*}By\n\x14\x63om.google.pubsub.v1B\x0bPubsubProtoP\x01Z6google.golang.org/genproto/googleapis/pubsub/v1;pubsub\xf8\x01\x01\xaa\x02\x16Google.Cloud.PubSub.V1b\x06proto3') + serialized_pb=_b('\n)google/cloud/proto/pubsub/v1/pubsub.proto\x12\x10google.pubsub.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"y\n\x05Topic\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x33\n\x06labels\x18\x02 \x03(\x0b\x32#.google.pubsub.v1.Topic.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xdb\x01\n\rPubsubMessage\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\x43\n\nattributes\x18\x02 \x03(\x0b\x32/.google.pubsub.v1.PubsubMessage.AttributesEntry\x12\x12\n\nmessage_id\x18\x03 \x01(\t\x12\x30\n\x0cpublish_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\x31\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\" \n\x0fGetTopicRequest\x12\r\n\x05topic\x18\x01 \x01(\t\"m\n\x12UpdateTopicRequest\x12&\n\x05topic\x18\x01 \x01(\x0b\x32\x17.google.pubsub.v1.Topic\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"R\n\x0ePublishRequest\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x31\n\x08messages\x18\x02 \x03(\x0b\x32\x1f.google.pubsub.v1.PubsubMessage\"&\n\x0fPublishResponse\x12\x13\n\x0bmessage_ids\x18\x01 \x03(\t\"K\n\x11ListTopicsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"V\n\x12ListTopicsResponse\x12\'\n\x06topics\x18\x01 \x03(\x0b\x32\x17.google.pubsub.v1.Topic\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"U\n\x1dListTopicSubscriptionsRequest\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"P\n\x1eListTopicSubscriptionsResponse\x12\x15\n\rsubscriptions\x18\x01 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"#\n\x12\x44\x65leteTopicRequest\x12\r\n\x05topic\x18\x01 \x01(\t\"\xc5\x02\n\x0cSubscription\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\x31\n\x0bpush_config\x18\x04 \x01(\x0b\x32\x1c.google.pubsub.v1.PushConfig\x12\x1c\n\x14\x61\x63k_deadline_seconds\x18\x05 \x01(\x05\x12\x1d\n\x15retain_acked_messages\x18\x07 \x01(\x08\x12=\n\x1amessage_retention_duration\x18\x08 \x01(\x0b\x32\x19.google.protobuf.Duration\x12:\n\x06labels\x18\t \x03(\x0b\x32*.google.pubsub.v1.Subscription.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x98\x01\n\nPushConfig\x12\x15\n\rpush_endpoint\x18\x01 \x01(\t\x12@\n\nattributes\x18\x02 \x03(\x0b\x32,.google.pubsub.v1.PushConfig.AttributesEntry\x1a\x31\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"S\n\x0fReceivedMessage\x12\x0e\n\x06\x61\x63k_id\x18\x01 \x01(\t\x12\x30\n\x07message\x18\x02 \x01(\x0b\x32\x1f.google.pubsub.v1.PubsubMessage\".\n\x16GetSubscriptionRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\"\x82\x01\n\x19UpdateSubscriptionRequest\x12\x34\n\x0csubscription\x18\x01 \x01(\x0b\x32\x1e.google.pubsub.v1.Subscription\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"R\n\x18ListSubscriptionsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"k\n\x19ListSubscriptionsResponse\x12\x35\n\rsubscriptions\x18\x01 \x03(\x0b\x32\x1e.google.pubsub.v1.Subscription\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"1\n\x19\x44\x65leteSubscriptionRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\"b\n\x17ModifyPushConfigRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x31\n\x0bpush_config\x18\x02 \x01(\x0b\x32\x1c.google.pubsub.v1.PushConfig\"U\n\x0bPullRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x1a\n\x12return_immediately\x18\x02 \x01(\x08\x12\x14\n\x0cmax_messages\x18\x03 \x01(\x05\"L\n\x0cPullResponse\x12<\n\x11received_messages\x18\x01 \x03(\x0b\x32!.google.pubsub.v1.ReceivedMessage\"_\n\x18ModifyAckDeadlineRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x04 \x03(\t\x12\x1c\n\x14\x61\x63k_deadline_seconds\x18\x03 \x01(\x05\";\n\x12\x41\x63knowledgeRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x02 \x03(\t\"\xa4\x01\n\x14StreamingPullRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x02 \x03(\t\x12\x1f\n\x17modify_deadline_seconds\x18\x03 \x03(\x05\x12\x1f\n\x17modify_deadline_ack_ids\x18\x04 \x03(\t\x12#\n\x1bstream_ack_deadline_seconds\x18\x05 \x01(\x05\"U\n\x15StreamingPullResponse\x12<\n\x11received_messages\x18\x01 \x03(\x0b\x32!.google.pubsub.v1.ReceivedMessage\";\n\x15\x43reateSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csubscription\x18\x02 \x01(\t\"v\n\x15UpdateSnapshotRequest\x12,\n\x08snapshot\x18\x01 \x01(\x0b\x32\x1a.google.pubsub.v1.Snapshot\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"\xbf\x01\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12/\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x06labels\x18\x04 \x03(\x0b\x32&.google.pubsub.v1.Snapshot.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"N\n\x14ListSnapshotsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"_\n\x15ListSnapshotsResponse\x12-\n\tsnapshots\x18\x01 \x03(\x0b\x32\x1a.google.pubsub.v1.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\")\n\x15\x44\x65leteSnapshotRequest\x12\x10\n\x08snapshot\x18\x01 \x01(\t\"m\n\x0bSeekRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12*\n\x04time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x12\n\x08snapshot\x18\x03 \x01(\tH\x00\x42\x08\n\x06target\"\x0e\n\x0cSeekResponse2\xf7\x10\n\nSubscriber\x12\x86\x01\n\x12\x43reateSubscription\x12\x1e.google.pubsub.v1.Subscription\x1a\x1e.google.pubsub.v1.Subscription\"0\x82\xd3\xe4\x93\x02*\x1a%/v1/{name=projects/*/subscriptions/*}:\x01*\x12\x92\x01\n\x0fGetSubscription\x12(.google.pubsub.v1.GetSubscriptionRequest\x1a\x1e.google.pubsub.v1.Subscription\"5\x82\xd3\xe4\x93\x02/\x12-/v1/{subscription=projects/*/subscriptions/*}\x12\xa0\x01\n\x12UpdateSubscription\x12+.google.pubsub.v1.UpdateSubscriptionRequest\x1a\x1e.google.pubsub.v1.Subscription\"=\x82\xd3\xe4\x93\x02\x37\x32\x32/v1/{subscription.name=projects/*/subscriptions/*}:\x01*\x12\x9c\x01\n\x11ListSubscriptions\x12*.google.pubsub.v1.ListSubscriptionsRequest\x1a+.google.pubsub.v1.ListSubscriptionsResponse\".\x82\xd3\xe4\x93\x02(\x12&/v1/{project=projects/*}/subscriptions\x12\x90\x01\n\x12\x44\x65leteSubscription\x12+.google.pubsub.v1.DeleteSubscriptionRequest\x1a\x16.google.protobuf.Empty\"5\x82\xd3\xe4\x93\x02/*-/v1/{subscription=projects/*/subscriptions/*}\x12\xa3\x01\n\x11ModifyAckDeadline\x12*.google.pubsub.v1.ModifyAckDeadlineRequest\x1a\x16.google.protobuf.Empty\"J\x82\xd3\xe4\x93\x02\x44\"?/v1/{subscription=projects/*/subscriptions/*}:modifyAckDeadline:\x01*\x12\x91\x01\n\x0b\x41\x63knowledge\x12$.google.pubsub.v1.AcknowledgeRequest\x1a\x16.google.protobuf.Empty\"D\x82\xd3\xe4\x93\x02>\"9/v1/{subscription=projects/*/subscriptions/*}:acknowledge:\x01*\x12\x84\x01\n\x04Pull\x12\x1d.google.pubsub.v1.PullRequest\x1a\x1e.google.pubsub.v1.PullResponse\"=\x82\xd3\xe4\x93\x02\x37\"2/v1/{subscription=projects/*/subscriptions/*}:pull:\x01*\x12\x64\n\rStreamingPull\x12&.google.pubsub.v1.StreamingPullRequest\x1a\'.google.pubsub.v1.StreamingPullResponse(\x01\x30\x01\x12\xa0\x01\n\x10ModifyPushConfig\x12).google.pubsub.v1.ModifyPushConfigRequest\x1a\x16.google.protobuf.Empty\"I\x82\xd3\xe4\x93\x02\x43\">/v1/{subscription=projects/*/subscriptions/*}:modifyPushConfig:\x01*\x12\x8c\x01\n\rListSnapshots\x12&.google.pubsub.v1.ListSnapshotsRequest\x1a\'.google.pubsub.v1.ListSnapshotsResponse\"*\x82\xd3\xe4\x93\x02$\x12\"/v1/{project=projects/*}/snapshots\x12\x83\x01\n\x0e\x43reateSnapshot\x12\'.google.pubsub.v1.CreateSnapshotRequest\x1a\x1a.google.pubsub.v1.Snapshot\",\x82\xd3\xe4\x93\x02&\x1a!/v1/{name=projects/*/snapshots/*}:\x01*\x12\x8c\x01\n\x0eUpdateSnapshot\x12\'.google.pubsub.v1.UpdateSnapshotRequest\x1a\x1a.google.pubsub.v1.Snapshot\"5\x82\xd3\xe4\x93\x02/2*/v1/{snapshot.name=projects/*/snapshots/*}:\x01*\x12\x80\x01\n\x0e\x44\x65leteSnapshot\x12\'.google.pubsub.v1.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty\"-\x82\xd3\xe4\x93\x02\'*%/v1/{snapshot=projects/*/snapshots/*}\x12\x84\x01\n\x04Seek\x12\x1d.google.pubsub.v1.SeekRequest\x1a\x1e.google.pubsub.v1.SeekResponse\"=\x82\xd3\xe4\x93\x02\x37\"2/v1/{subscription=projects/*/subscriptions/*}:seek:\x01*2\x9a\x07\n\tPublisher\x12j\n\x0b\x43reateTopic\x12\x17.google.pubsub.v1.Topic\x1a\x17.google.pubsub.v1.Topic\")\x82\xd3\xe4\x93\x02#\x1a\x1e/v1/{name=projects/*/topics/*}:\x01*\x12}\n\x0bUpdateTopic\x12$.google.pubsub.v1.UpdateTopicRequest\x1a\x17.google.pubsub.v1.Topic\"/\x82\xd3\xe4\x93\x02)2$/v1/{topic.name=projects/*/topics/*}:\x01*\x12\x82\x01\n\x07Publish\x12 .google.pubsub.v1.PublishRequest\x1a!.google.pubsub.v1.PublishResponse\"2\x82\xd3\xe4\x93\x02,\"\'/v1/{topic=projects/*/topics/*}:publish:\x01*\x12o\n\x08GetTopic\x12!.google.pubsub.v1.GetTopicRequest\x1a\x17.google.pubsub.v1.Topic\"\'\x82\xd3\xe4\x93\x02!\x12\x1f/v1/{topic=projects/*/topics/*}\x12\x80\x01\n\nListTopics\x12#.google.pubsub.v1.ListTopicsRequest\x1a$.google.pubsub.v1.ListTopicsResponse\"\'\x82\xd3\xe4\x93\x02!\x12\x1f/v1/{project=projects/*}/topics\x12\xb2\x01\n\x16ListTopicSubscriptions\x12/.google.pubsub.v1.ListTopicSubscriptionsRequest\x1a\x30.google.pubsub.v1.ListTopicSubscriptionsResponse\"5\x82\xd3\xe4\x93\x02/\x12-/v1/{topic=projects/*/topics/*}/subscriptions\x12t\n\x0b\x44\x65leteTopic\x12$.google.pubsub.v1.DeleteTopicRequest\x1a\x16.google.protobuf.Empty\"\'\x82\xd3\xe4\x93\x02!*\x1f/v1/{topic=projects/*/topics/*}By\n\x14\x63om.google.pubsub.v1B\x0bPubsubProtoP\x01Z6google.golang.org/genproto/googleapis/pubsub/v1;pubsub\xf8\x01\x01\xaa\x02\x16Google.Cloud.PubSub.V1b\x06proto3') , dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) @@ -32,6 +32,43 @@ +_TOPIC_LABELSENTRY = _descriptor.Descriptor( + name='LabelsEntry', + full_name='google.pubsub.v1.Topic.LabelsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.pubsub.v1.Topic.LabelsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.pubsub.v1.Topic.LabelsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=297, + serialized_end=342, +) + _TOPIC = _descriptor.Descriptor( name='Topic', full_name='google.pubsub.v1.Topic', @@ -46,10 +83,17 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), + _descriptor.FieldDescriptor( + name='labels', full_name='google.pubsub.v1.Topic.labels', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), ], extensions=[ ], - nested_types=[], + nested_types=[_TOPIC_LABELSENTRY, ], enum_types=[ ], options=None, @@ -59,7 +103,7 @@ oneofs=[ ], serialized_start=221, - serialized_end=242, + serialized_end=342, ) @@ -96,8 +140,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=415, - serialized_end=464, + serialized_start=515, + serialized_end=564, ) _PUBSUBMESSAGE = _descriptor.Descriptor( @@ -147,8 +191,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=245, - serialized_end=464, + serialized_start=345, + serialized_end=564, ) @@ -178,8 +222,46 @@ extension_ranges=[], oneofs=[ ], - serialized_start=466, - serialized_end=498, + serialized_start=566, + serialized_end=598, +) + + +_UPDATETOPICREQUEST = _descriptor.Descriptor( + name='UpdateTopicRequest', + full_name='google.pubsub.v1.UpdateTopicRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='topic', full_name='google.pubsub.v1.UpdateTopicRequest.topic', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='update_mask', full_name='google.pubsub.v1.UpdateTopicRequest.update_mask', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=600, + serialized_end=709, ) @@ -216,8 +298,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=500, - serialized_end=582, + serialized_start=711, + serialized_end=793, ) @@ -247,8 +329,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=584, - serialized_end=622, + serialized_start=795, + serialized_end=833, ) @@ -292,8 +374,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=624, - serialized_end=699, + serialized_start=835, + serialized_end=910, ) @@ -330,8 +412,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=701, - serialized_end=787, + serialized_start=912, + serialized_end=998, ) @@ -375,8 +457,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=789, - serialized_end=874, + serialized_start=1000, + serialized_end=1085, ) @@ -413,8 +495,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=876, - serialized_end=956, + serialized_start=1087, + serialized_end=1167, ) @@ -444,11 +526,48 @@ extension_ranges=[], oneofs=[ ], - serialized_start=958, - serialized_end=993, + serialized_start=1169, + serialized_end=1204, ) +_SUBSCRIPTION_LABELSENTRY = _descriptor.Descriptor( + name='LabelsEntry', + full_name='google.pubsub.v1.Subscription.LabelsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.pubsub.v1.Subscription.LabelsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.pubsub.v1.Subscription.LabelsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=297, + serialized_end=342, +) + _SUBSCRIPTION = _descriptor.Descriptor( name='Subscription', full_name='google.pubsub.v1.Subscription', @@ -498,10 +617,17 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), + _descriptor.FieldDescriptor( + name='labels', full_name='google.pubsub.v1.Subscription.labels', index=6, + number=9, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), ], extensions=[ ], - nested_types=[], + nested_types=[_SUBSCRIPTION_LABELSENTRY, ], enum_types=[ ], options=None, @@ -510,8 +636,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=996, - serialized_end=1214, + serialized_start=1207, + serialized_end=1532, ) @@ -548,8 +674,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=415, - serialized_end=464, + serialized_start=515, + serialized_end=564, ) _PUSHCONFIG = _descriptor.Descriptor( @@ -585,8 +711,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1217, - serialized_end=1369, + serialized_start=1535, + serialized_end=1687, ) @@ -623,8 +749,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1371, - serialized_end=1454, + serialized_start=1689, + serialized_end=1772, ) @@ -654,8 +780,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1456, - serialized_end=1502, + serialized_start=1774, + serialized_end=1820, ) @@ -692,8 +818,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1505, - serialized_end=1635, + serialized_start=1823, + serialized_end=1953, ) @@ -737,8 +863,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1637, - serialized_end=1719, + serialized_start=1955, + serialized_end=2037, ) @@ -775,8 +901,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1721, - serialized_end=1828, + serialized_start=2039, + serialized_end=2146, ) @@ -806,8 +932,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1830, - serialized_end=1879, + serialized_start=2148, + serialized_end=2197, ) @@ -844,8 +970,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1881, - serialized_end=1979, + serialized_start=2199, + serialized_end=2297, ) @@ -889,8 +1015,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1981, - serialized_end=2066, + serialized_start=2299, + serialized_end=2384, ) @@ -920,8 +1046,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2068, - serialized_end=2144, + serialized_start=2386, + serialized_end=2462, ) @@ -965,8 +1091,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2146, - serialized_end=2241, + serialized_start=2464, + serialized_end=2559, ) @@ -1003,8 +1129,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2243, - serialized_end=2302, + serialized_start=2561, + serialized_end=2620, ) @@ -1062,8 +1188,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2305, - serialized_end=2469, + serialized_start=2623, + serialized_end=2787, ) @@ -1093,8 +1219,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2471, - serialized_end=2556, + serialized_start=2789, + serialized_end=2874, ) @@ -1131,11 +1257,86 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2558, - serialized_end=2617, + serialized_start=2876, + serialized_end=2935, +) + + +_UPDATESNAPSHOTREQUEST = _descriptor.Descriptor( + name='UpdateSnapshotRequest', + full_name='google.pubsub.v1.UpdateSnapshotRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='snapshot', full_name='google.pubsub.v1.UpdateSnapshotRequest.snapshot', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='update_mask', full_name='google.pubsub.v1.UpdateSnapshotRequest.update_mask', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2937, + serialized_end=3055, ) +_SNAPSHOT_LABELSENTRY = _descriptor.Descriptor( + name='LabelsEntry', + full_name='google.pubsub.v1.Snapshot.LabelsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.pubsub.v1.Snapshot.LabelsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.pubsub.v1.Snapshot.LabelsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=297, + serialized_end=342, +) + _SNAPSHOT = _descriptor.Descriptor( name='Snapshot', full_name='google.pubsub.v1.Snapshot', @@ -1164,10 +1365,17 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), + _descriptor.FieldDescriptor( + name='labels', full_name='google.pubsub.v1.Snapshot.labels', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), ], extensions=[ ], - nested_types=[], + nested_types=[_SNAPSHOT_LABELSENTRY, ], enum_types=[ ], options=None, @@ -1176,8 +1384,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2619, - serialized_end=2707, + serialized_start=3058, + serialized_end=3249, ) @@ -1221,8 +1429,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2709, - serialized_end=2787, + serialized_start=3251, + serialized_end=3329, ) @@ -1259,8 +1467,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2789, - serialized_end=2884, + serialized_start=3331, + serialized_end=3426, ) @@ -1290,8 +1498,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2886, - serialized_end=2927, + serialized_start=3428, + serialized_end=3469, ) @@ -1338,8 +1546,8 @@ name='target', full_name='google.pubsub.v1.SeekRequest.target', index=0, containing_type=None, fields=[]), ], - serialized_start=2929, - serialized_end=3038, + serialized_start=3471, + serialized_end=3580, ) @@ -1362,17 +1570,23 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3040, - serialized_end=3054, + serialized_start=3582, + serialized_end=3596, ) +_TOPIC_LABELSENTRY.containing_type = _TOPIC +_TOPIC.fields_by_name['labels'].message_type = _TOPIC_LABELSENTRY _PUBSUBMESSAGE_ATTRIBUTESENTRY.containing_type = _PUBSUBMESSAGE _PUBSUBMESSAGE.fields_by_name['attributes'].message_type = _PUBSUBMESSAGE_ATTRIBUTESENTRY _PUBSUBMESSAGE.fields_by_name['publish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATETOPICREQUEST.fields_by_name['topic'].message_type = _TOPIC +_UPDATETOPICREQUEST.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK _PUBLISHREQUEST.fields_by_name['messages'].message_type = _PUBSUBMESSAGE _LISTTOPICSRESPONSE.fields_by_name['topics'].message_type = _TOPIC +_SUBSCRIPTION_LABELSENTRY.containing_type = _SUBSCRIPTION _SUBSCRIPTION.fields_by_name['push_config'].message_type = _PUSHCONFIG _SUBSCRIPTION.fields_by_name['message_retention_duration'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_SUBSCRIPTION.fields_by_name['labels'].message_type = _SUBSCRIPTION_LABELSENTRY _PUSHCONFIG_ATTRIBUTESENTRY.containing_type = _PUSHCONFIG _PUSHCONFIG.fields_by_name['attributes'].message_type = _PUSHCONFIG_ATTRIBUTESENTRY _RECEIVEDMESSAGE.fields_by_name['message'].message_type = _PUBSUBMESSAGE @@ -1382,7 +1596,11 @@ _MODIFYPUSHCONFIGREQUEST.fields_by_name['push_config'].message_type = _PUSHCONFIG _PULLRESPONSE.fields_by_name['received_messages'].message_type = _RECEIVEDMESSAGE _STREAMINGPULLRESPONSE.fields_by_name['received_messages'].message_type = _RECEIVEDMESSAGE +_UPDATESNAPSHOTREQUEST.fields_by_name['snapshot'].message_type = _SNAPSHOT +_UPDATESNAPSHOTREQUEST.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK +_SNAPSHOT_LABELSENTRY.containing_type = _SNAPSHOT _SNAPSHOT.fields_by_name['expire_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_SNAPSHOT.fields_by_name['labels'].message_type = _SNAPSHOT_LABELSENTRY _LISTSNAPSHOTSRESPONSE.fields_by_name['snapshots'].message_type = _SNAPSHOT _SEEKREQUEST.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _SEEKREQUEST.oneofs_by_name['target'].fields.append( @@ -1394,6 +1612,7 @@ DESCRIPTOR.message_types_by_name['Topic'] = _TOPIC DESCRIPTOR.message_types_by_name['PubsubMessage'] = _PUBSUBMESSAGE DESCRIPTOR.message_types_by_name['GetTopicRequest'] = _GETTOPICREQUEST +DESCRIPTOR.message_types_by_name['UpdateTopicRequest'] = _UPDATETOPICREQUEST DESCRIPTOR.message_types_by_name['PublishRequest'] = _PUBLISHREQUEST DESCRIPTOR.message_types_by_name['PublishResponse'] = _PUBLISHRESPONSE DESCRIPTOR.message_types_by_name['ListTopicsRequest'] = _LISTTOPICSREQUEST @@ -1417,6 +1636,7 @@ DESCRIPTOR.message_types_by_name['StreamingPullRequest'] = _STREAMINGPULLREQUEST DESCRIPTOR.message_types_by_name['StreamingPullResponse'] = _STREAMINGPULLRESPONSE DESCRIPTOR.message_types_by_name['CreateSnapshotRequest'] = _CREATESNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name['UpdateSnapshotRequest'] = _UPDATESNAPSHOTREQUEST DESCRIPTOR.message_types_by_name['Snapshot'] = _SNAPSHOT DESCRIPTOR.message_types_by_name['ListSnapshotsRequest'] = _LISTSNAPSHOTSREQUEST DESCRIPTOR.message_types_by_name['ListSnapshotsResponse'] = _LISTSNAPSHOTSRESPONSE @@ -1425,11 +1645,35 @@ DESCRIPTOR.message_types_by_name['SeekResponse'] = _SEEKRESPONSE Topic = _reflection.GeneratedProtocolMessageType('Topic', (_message.Message,), dict( + + LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( + DESCRIPTOR = _TOPIC_LABELSENTRY, + __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + # @@protoc_insertion_point(class_scope:google.pubsub.v1.Topic.LabelsEntry) + )) + , DESCRIPTOR = _TOPIC, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """A topic resource. + + + Attributes: + name: + The name of the topic. It must have the format + ``"projects/{project}/topics/{topic}"``. ``{topic}`` must + start with a letter, and contain only letters (``[A-Za-z]``), + numbers (``[0-9]``), dashes (``-``), underscores (``_``), + periods (``.``), tildes (``~``), plus (``+``) or percent signs + (``%``). It must be between 3 and 255 characters in length, + and it must not start with ``"goog"``. + labels: + User labels. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.Topic) )) _sym_db.RegisterMessage(Topic) +_sym_db.RegisterMessage(Topic.LabelsEntry) PubsubMessage = _reflection.GeneratedProtocolMessageType('PubsubMessage', (_message.Message,), dict( @@ -1441,6 +1685,28 @@ , DESCRIPTOR = _PUBSUBMESSAGE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """A message data and its attributes. The message payload must not be + empty; it must contain either a non-empty data field, or at least one + attribute. + + + Attributes: + data: + The message payload. + attributes: + Optional attributes for this message. + message_id: + ID of this message, assigned by the server when the message is + published. Guaranteed to be unique within the topic. This + value may be read by a subscriber that receives a + ``PubsubMessage`` via a ``Pull`` call or a push delivery. It + must not be populated by the publisher in a ``Publish`` call. + publish_time: + The time at which the message was published, populated by the + server when it receives the ``Publish`` call. It must not be + populated by the publisher in a ``Publish`` call. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.PubsubMessage) )) _sym_db.RegisterMessage(PubsubMessage) @@ -1449,13 +1715,51 @@ GetTopicRequest = _reflection.GeneratedProtocolMessageType('GetTopicRequest', (_message.Message,), dict( DESCRIPTOR = _GETTOPICREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the GetTopic method. + + + Attributes: + topic: + The name of the topic to get. Format is + ``projects/{project}/topics/{topic}``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.GetTopicRequest) )) _sym_db.RegisterMessage(GetTopicRequest) +UpdateTopicRequest = _reflection.GeneratedProtocolMessageType('UpdateTopicRequest', (_message.Message,), dict( + DESCRIPTOR = _UPDATETOPICREQUEST, + __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the UpdateTopic method. + + + Attributes: + topic: + The topic to update. + update_mask: + Indicates which fields in the provided topic to update. Must + be specified and non-empty. + """, + # @@protoc_insertion_point(class_scope:google.pubsub.v1.UpdateTopicRequest) + )) +_sym_db.RegisterMessage(UpdateTopicRequest) + PublishRequest = _reflection.GeneratedProtocolMessageType('PublishRequest', (_message.Message,), dict( DESCRIPTOR = _PUBLISHREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the Publish method. + + + Attributes: + topic: + The messages in the request will be published on this topic. + Format is ``projects/{project}/topics/{topic}``. + messages: + The messages to publish. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.PublishRequest) )) _sym_db.RegisterMessage(PublishRequest) @@ -1463,6 +1767,16 @@ PublishResponse = _reflection.GeneratedProtocolMessageType('PublishResponse', (_message.Message,), dict( DESCRIPTOR = _PUBLISHRESPONSE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Response for the ``Publish`` method. + + + Attributes: + message_ids: + The server-assigned ID of each published message, in the same + order as the messages in the request. IDs are guaranteed to be + unique within the topic. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.PublishResponse) )) _sym_db.RegisterMessage(PublishResponse) @@ -1470,6 +1784,22 @@ ListTopicsRequest = _reflection.GeneratedProtocolMessageType('ListTopicsRequest', (_message.Message,), dict( DESCRIPTOR = _LISTTOPICSREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``ListTopics`` method. + + + Attributes: + project: + The name of the cloud project that topics belong to. Format is + ``projects/{project}``. + page_size: + Maximum number of topics to return. + page_token: + The value returned by the last ``ListTopicsResponse``; + indicates that this is a continuation of a prior + ``ListTopics`` call, and that the system should return the + next page of data. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListTopicsRequest) )) _sym_db.RegisterMessage(ListTopicsRequest) @@ -1477,6 +1807,18 @@ ListTopicsResponse = _reflection.GeneratedProtocolMessageType('ListTopicsResponse', (_message.Message,), dict( DESCRIPTOR = _LISTTOPICSRESPONSE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Response for the ``ListTopics`` method. + + + Attributes: + topics: + The resulting topics. + next_page_token: + If not empty, indicates that there may be more topics that + match the request; this value should be passed in a new + ``ListTopicsRequest``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListTopicsResponse) )) _sym_db.RegisterMessage(ListTopicsResponse) @@ -1484,6 +1826,22 @@ ListTopicSubscriptionsRequest = _reflection.GeneratedProtocolMessageType('ListTopicSubscriptionsRequest', (_message.Message,), dict( DESCRIPTOR = _LISTTOPICSUBSCRIPTIONSREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``ListTopicSubscriptions`` method. + + + Attributes: + topic: + The name of the topic that subscriptions are attached to. + Format is ``projects/{project}/topics/{topic}``. + page_size: + Maximum number of subscription names to return. + page_token: + The value returned by the last + ``ListTopicSubscriptionsResponse``; indicates that this is a + continuation of a prior ``ListTopicSubscriptions`` call, and + that the system should return the next page of data. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListTopicSubscriptionsRequest) )) _sym_db.RegisterMessage(ListTopicSubscriptionsRequest) @@ -1491,6 +1849,18 @@ ListTopicSubscriptionsResponse = _reflection.GeneratedProtocolMessageType('ListTopicSubscriptionsResponse', (_message.Message,), dict( DESCRIPTOR = _LISTTOPICSUBSCRIPTIONSRESPONSE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Response for the ``ListTopicSubscriptions`` method. + + + Attributes: + subscriptions: + The names of the subscriptions that match the request. + next_page_token: + If not empty, indicates that there may be more subscriptions + that match the request; this value should be passed in a new + ``ListTopicSubscriptionsRequest`` to get more subscriptions. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListTopicSubscriptionsResponse) )) _sym_db.RegisterMessage(ListTopicSubscriptionsResponse) @@ -1498,16 +1868,88 @@ DeleteTopicRequest = _reflection.GeneratedProtocolMessageType('DeleteTopicRequest', (_message.Message,), dict( DESCRIPTOR = _DELETETOPICREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``DeleteTopic`` method. + + + Attributes: + topic: + Name of the topic to delete. Format is + ``projects/{project}/topics/{topic}``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.DeleteTopicRequest) )) _sym_db.RegisterMessage(DeleteTopicRequest) Subscription = _reflection.GeneratedProtocolMessageType('Subscription', (_message.Message,), dict( + + LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( + DESCRIPTOR = _SUBSCRIPTION_LABELSENTRY, + __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + # @@protoc_insertion_point(class_scope:google.pubsub.v1.Subscription.LabelsEntry) + )) + , DESCRIPTOR = _SUBSCRIPTION, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """A subscription resource. + + + Attributes: + name: + The name of the subscription. It must have the format + ``"projects/{project}/subscriptions/{subscription}"``. + ``{subscription}`` must start with a letter, and contain only + letters (``[A-Za-z]``), numbers (``[0-9]``), dashes (``-``), + underscores (``_``), periods (``.``), tildes (``~``), plus + (``+``) or percent signs (``%``). It must be between 3 and 255 + characters in length, and it must not start with ``"goog"``. + topic: + The name of the topic from which this subscription is + receiving messages. Format is + ``projects/{project}/topics/{topic}``. The value of this field + will be ``_deleted-topic_`` if the topic has been deleted. + push_config: + If push delivery is used with this subscription, this field is + used to configure it. An empty ``pushConfig`` signifies that + the subscriber will pull and ack messages using API methods. + ack_deadline_seconds: + This value is the maximum time after a subscriber receives a + message before the subscriber should acknowledge the message. + After message delivery but before the ack deadline expires and + before the message is acknowledged, it is an outstanding + message and will not be delivered again during that time (on a + best-effort basis). For pull subscriptions, this value is + used as the initial value for the ack deadline. To override + this value for a given message, call ``ModifyAckDeadline`` + with the corresponding ``ack_id`` if using pull. The minimum + custom deadline you can specify is 10 seconds. The maximum + custom deadline you can specify is 600 seconds (10 minutes). + If this parameter is 0, a default value of 10 seconds is used. + For push delivery, this value is also used to set the request + timeout for the call to the push endpoint. If the subscriber + never acknowledges the message, the Pub/Sub system will + eventually redeliver the message. + retain_acked_messages: + Indicates whether to retain acknowledged messages. If true, + then messages are not expunged from the subscription's + backlog, even if they are acknowledged, until they fall out of + the ``message_retention_duration`` window. + message_retention_duration: + How long to retain unacknowledged messages in the + subscription's backlog, from the moment a message is + published. If ``retain_acked_messages`` is true, then this + also configures the retention of acknowledged messages, and + thus configures how far back in time a ``Seek`` can be done. + Defaults to 7 days. Cannot be more than 7 days or less than 10 + minutes. + labels: + User labels. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.Subscription) )) _sym_db.RegisterMessage(Subscription) +_sym_db.RegisterMessage(Subscription.LabelsEntry) PushConfig = _reflection.GeneratedProtocolMessageType('PushConfig', (_message.Message,), dict( @@ -1519,6 +1961,35 @@ , DESCRIPTOR = _PUSHCONFIG, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Configuration for a push delivery endpoint. + + + Attributes: + push_endpoint: + A URL locating the endpoint to which messages should be + pushed. For example, a Webhook endpoint might use + "https://example.com/push". + attributes: + Endpoint configuration attributes. Every endpoint has a set + of API supported attributes that can be used to control + different aspects of the message delivery. The currently + supported attribute is ``x-goog-version``, which you can use + to change the format of the pushed message. This attribute + indicates the version of the data expected by the endpoint. + This controls the shape of the pushed message (i.e., its + fields and metadata). The endpoint version is based on the + version of the Pub/Sub API. If not present during the + ``CreateSubscription`` call, it will default to the version of + the API used to make such call. If not present during a + ``ModifyPushConfig`` call, its value will not be changed. + ``GetSubscription`` calls will always return a valid version, + even if the subscription was created without this attribute. + The possible values for this attribute are: - ``v1beta1``: + uses the push format defined in the v1beta1 Pub/Sub API. - + ``v1`` or ``v1beta2``: uses the push format defined in the v1 + Pub/Sub API. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.PushConfig) )) _sym_db.RegisterMessage(PushConfig) @@ -1527,6 +1998,16 @@ ReceivedMessage = _reflection.GeneratedProtocolMessageType('ReceivedMessage', (_message.Message,), dict( DESCRIPTOR = _RECEIVEDMESSAGE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """A message and its corresponding acknowledgment ID. + + + Attributes: + ack_id: + This ID can be used to acknowledge the received message. + message: + The message. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ReceivedMessage) )) _sym_db.RegisterMessage(ReceivedMessage) @@ -1534,6 +2015,15 @@ GetSubscriptionRequest = _reflection.GeneratedProtocolMessageType('GetSubscriptionRequest', (_message.Message,), dict( DESCRIPTOR = _GETSUBSCRIPTIONREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the GetSubscription method. + + + Attributes: + subscription: + The name of the subscription to get. Format is + ``projects/{project}/subscriptions/{sub}``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.GetSubscriptionRequest) )) _sym_db.RegisterMessage(GetSubscriptionRequest) @@ -1541,6 +2031,17 @@ UpdateSubscriptionRequest = _reflection.GeneratedProtocolMessageType('UpdateSubscriptionRequest', (_message.Message,), dict( DESCRIPTOR = _UPDATESUBSCRIPTIONREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the UpdateSubscription method. + + + Attributes: + subscription: + The updated subscription object. + update_mask: + Indicates which fields in the provided subscription to update. + Must be specified and non-empty. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.UpdateSubscriptionRequest) )) _sym_db.RegisterMessage(UpdateSubscriptionRequest) @@ -1548,6 +2049,22 @@ ListSubscriptionsRequest = _reflection.GeneratedProtocolMessageType('ListSubscriptionsRequest', (_message.Message,), dict( DESCRIPTOR = _LISTSUBSCRIPTIONSREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``ListSubscriptions`` method. + + + Attributes: + project: + The name of the cloud project that subscriptions belong to. + Format is ``projects/{project}``. + page_size: + Maximum number of subscriptions to return. + page_token: + The value returned by the last ``ListSubscriptionsResponse``; + indicates that this is a continuation of a prior + ``ListSubscriptions`` call, and that the system should return + the next page of data. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListSubscriptionsRequest) )) _sym_db.RegisterMessage(ListSubscriptionsRequest) @@ -1555,6 +2072,18 @@ ListSubscriptionsResponse = _reflection.GeneratedProtocolMessageType('ListSubscriptionsResponse', (_message.Message,), dict( DESCRIPTOR = _LISTSUBSCRIPTIONSRESPONSE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Response for the ``ListSubscriptions`` method. + + + Attributes: + subscriptions: + The subscriptions that match the request. + next_page_token: + If not empty, indicates that there may be more subscriptions + that match the request; this value should be passed in a new + ``ListSubscriptionsRequest`` to get more subscriptions. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListSubscriptionsResponse) )) _sym_db.RegisterMessage(ListSubscriptionsResponse) @@ -1562,6 +2091,15 @@ DeleteSubscriptionRequest = _reflection.GeneratedProtocolMessageType('DeleteSubscriptionRequest', (_message.Message,), dict( DESCRIPTOR = _DELETESUBSCRIPTIONREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the DeleteSubscription method. + + + Attributes: + subscription: + The subscription to delete. Format is + ``projects/{project}/subscriptions/{sub}``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.DeleteSubscriptionRequest) )) _sym_db.RegisterMessage(DeleteSubscriptionRequest) @@ -1569,6 +2107,21 @@ ModifyPushConfigRequest = _reflection.GeneratedProtocolMessageType('ModifyPushConfigRequest', (_message.Message,), dict( DESCRIPTOR = _MODIFYPUSHCONFIGREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ModifyPushConfig method. + + + Attributes: + subscription: + The name of the subscription. Format is + ``projects/{project}/subscriptions/{sub}``. + push_config: + The push configuration for future deliveries. An empty + ``pushConfig`` indicates that the Pub/Sub system should stop + pushing messages from the given subscription and allow + messages to be pulled and acknowledged - effectively pausing + the subscription if ``Pull`` is not called. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ModifyPushConfigRequest) )) _sym_db.RegisterMessage(ModifyPushConfigRequest) @@ -1576,6 +2129,26 @@ PullRequest = _reflection.GeneratedProtocolMessageType('PullRequest', (_message.Message,), dict( DESCRIPTOR = _PULLREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``Pull`` method. + + + Attributes: + subscription: + The subscription from which messages should be pulled. Format + is ``projects/{project}/subscriptions/{sub}``. + return_immediately: + If this field set to true, the system will respond immediately + even if it there are no messages available to return in the + ``Pull`` response. Otherwise, the system may wait (for a + bounded amount of time) until at least one message is + available, rather than returning no messages. The client may + cancel the request if it does not wish to wait any longer for + the response. + max_messages: + The maximum number of messages returned for this request. The + Pub/Sub system may return fewer than the number specified. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.PullRequest) )) _sym_db.RegisterMessage(PullRequest) @@ -1583,6 +2156,18 @@ PullResponse = _reflection.GeneratedProtocolMessageType('PullResponse', (_message.Message,), dict( DESCRIPTOR = _PULLRESPONSE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Response for the ``Pull`` method. + + + Attributes: + received_messages: + Received Pub/Sub messages. The Pub/Sub system will return zero + messages if there are no more available in the backlog. The + Pub/Sub system may return fewer than the ``maxMessages`` + requested even if there are more messages available in the + backlog. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.PullResponse) )) _sym_db.RegisterMessage(PullResponse) @@ -1590,6 +2175,26 @@ ModifyAckDeadlineRequest = _reflection.GeneratedProtocolMessageType('ModifyAckDeadlineRequest', (_message.Message,), dict( DESCRIPTOR = _MODIFYACKDEADLINEREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ModifyAckDeadline method. + + + Attributes: + subscription: + The name of the subscription. Format is + ``projects/{project}/subscriptions/{sub}``. + ack_ids: + List of acknowledgment IDs. + ack_deadline_seconds: + The new ack deadline with respect to the time this request was + sent to the Pub/Sub system. For example, if the value is 10, + the new ack deadline will expire 10 seconds after the + ``ModifyAckDeadline`` call was made. Specifying zero may + immediately make the message available for another pull + request. The minimum deadline you can specify is 0 seconds. + The maximum deadline you can specify is 600 seconds (10 + minutes). + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ModifyAckDeadlineRequest) )) _sym_db.RegisterMessage(ModifyAckDeadlineRequest) @@ -1597,6 +2202,19 @@ AcknowledgeRequest = _reflection.GeneratedProtocolMessageType('AcknowledgeRequest', (_message.Message,), dict( DESCRIPTOR = _ACKNOWLEDGEREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the Acknowledge method. + + + Attributes: + subscription: + The subscription whose message is being acknowledged. Format + is ``projects/{project}/subscriptions/{sub}``. + ack_ids: + The acknowledgment ID for the messages being acknowledged that + was returned by the Pub/Sub system in the ``Pull`` response. + Must not be empty. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.AcknowledgeRequest) )) _sym_db.RegisterMessage(AcknowledgeRequest) @@ -1604,6 +2222,55 @@ StreamingPullRequest = _reflection.GeneratedProtocolMessageType('StreamingPullRequest', (_message.Message,), dict( DESCRIPTOR = _STREAMINGPULLREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``StreamingPull`` streaming RPC method. This request is + used to establish the initial stream as well as to stream + acknowledgements and ack deadline modifications from the client to the + server. + + + Attributes: + subscription: + The subscription for which to initialize the new stream. This + must be provided in the first request on the stream, and must + not be set in subsequent requests from client to server. + Format is ``projects/{project}/subscriptions/{sub}``. + ack_ids: + List of acknowledgement IDs for acknowledging previously + received messages (received on this stream or a different + stream). If an ack ID has expired, the corresponding message + may be redelivered later. Acknowledging a message more than + once will not result in an error. If the acknowledgement ID is + malformed, the stream will be aborted with status + ``INVALID_ARGUMENT``. + modify_deadline_seconds: + The list of new ack deadlines for the IDs listed in + ``modify_deadline_ack_ids``. The size of this list must be the + same as the size of ``modify_deadline_ack_ids``. If it differs + the stream will be aborted with ``INVALID_ARGUMENT``. Each + element in this list is applied to the element in the same + position in ``modify_deadline_ack_ids``. The new ack deadline + is with respect to the time this request was sent to the + Pub/Sub system. Must be >= 0. For example, if the value is 10, + the new ack deadline will expire 10 seconds after this request + is received. If the value is 0, the message is immediately + made available for another streaming or non-streaming pull + request. If the value is < 0 (an error), the stream will be + aborted with status ``INVALID_ARGUMENT``. + modify_deadline_ack_ids: + List of acknowledgement IDs whose deadline will be modified + based on the corresponding element in + ``modify_deadline_seconds``. This field can be used to + indicate that more time is needed to process a message by the + subscriber, or to make the message available for redelivery if + the processing was interrupted. + stream_ack_deadline_seconds: + The ack deadline to use for the stream. This must be provided + in the first request on the stream, but it can also be updated + on subsequent requests from client to server. The minimum + deadline you can specify is 10 seconds. The maximum deadline + you can specify is 600 seconds (10 minutes). + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.StreamingPullRequest) )) _sym_db.RegisterMessage(StreamingPullRequest) @@ -1611,6 +2278,15 @@ StreamingPullResponse = _reflection.GeneratedProtocolMessageType('StreamingPullResponse', (_message.Message,), dict( DESCRIPTOR = _STREAMINGPULLRESPONSE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Response for the ``StreamingPull`` method. This response is used to + stream messages from the server to the client. + + + Attributes: + received_messages: + Received Pub/Sub messages. This will not be empty. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.StreamingPullResponse) )) _sym_db.RegisterMessage(StreamingPullResponse) @@ -1618,20 +2294,109 @@ CreateSnapshotRequest = _reflection.GeneratedProtocolMessageType('CreateSnapshotRequest', (_message.Message,), dict( DESCRIPTOR = _CREATESNAPSHOTREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``CreateSnapshot`` method. + + + Attributes: + name: + Optional user-provided name for this snapshot. If the name is + not provided in the request, the server will assign a random + name for this snapshot on the same project as the + subscription. Note that for REST API requests, you must + specify a name. Format is + ``projects/{project}/snapshots/{snap}``. + subscription: + The subscription whose backlog the snapshot retains. + Specifically, the created snapshot is guaranteed to retain: + (a) The existing backlog on the subscription. More precisely, + this is defined as the messages in the subscription's backlog + that are unacknowledged upon the successful completion of the + ``CreateSnapshot`` request; as well as: (b) Any messages + published to the subscription's topic following the successful + completion of the CreateSnapshot request. Format is + ``projects/{project}/subscriptions/{sub}``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.CreateSnapshotRequest) )) _sym_db.RegisterMessage(CreateSnapshotRequest) +UpdateSnapshotRequest = _reflection.GeneratedProtocolMessageType('UpdateSnapshotRequest', (_message.Message,), dict( + DESCRIPTOR = _UPDATESNAPSHOTREQUEST, + __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the UpdateSnapshot method. + + + Attributes: + snapshot: + The updated snpashot object. + update_mask: + Indicates which fields in the provided snapshot to update. + Must be specified and non-empty. + """, + # @@protoc_insertion_point(class_scope:google.pubsub.v1.UpdateSnapshotRequest) + )) +_sym_db.RegisterMessage(UpdateSnapshotRequest) + Snapshot = _reflection.GeneratedProtocolMessageType('Snapshot', (_message.Message,), dict( + + LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( + DESCRIPTOR = _SNAPSHOT_LABELSENTRY, + __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + # @@protoc_insertion_point(class_scope:google.pubsub.v1.Snapshot.LabelsEntry) + )) + , DESCRIPTOR = _SNAPSHOT, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """A snapshot resource. + + + Attributes: + name: + The name of the snapshot. + topic: + The name of the topic from which this snapshot is retaining + messages. + expire_time: + The snapshot is guaranteed to exist up until this time. A + newly-created snapshot expires no later than 7 days from the + time of its creation. Its exact lifetime is determined at + creation by the existing backlog in the source subscription. + Specifically, the lifetime of the snapshot is ``7 days - (age + of oldest unacked message in the subscription)``. For example, + consider a subscription whose oldest unacked message is 3 days + old. If a snapshot is created from this subscription, the + snapshot -- which will always capture this 3-day-old backlog + as long as the snapshot exists -- will expire in 4 days. + labels: + User labels. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.Snapshot) )) _sym_db.RegisterMessage(Snapshot) +_sym_db.RegisterMessage(Snapshot.LabelsEntry) ListSnapshotsRequest = _reflection.GeneratedProtocolMessageType('ListSnapshotsRequest', (_message.Message,), dict( DESCRIPTOR = _LISTSNAPSHOTSREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``ListSnapshots`` method. + + + Attributes: + project: + The name of the cloud project that snapshots belong to. Format + is ``projects/{project}``. + page_size: + Maximum number of snapshots to return. + page_token: + The value returned by the last ``ListSnapshotsResponse``; + indicates that this is a continuation of a prior + ``ListSnapshots`` call, and that the system should return the + next page of data. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListSnapshotsRequest) )) _sym_db.RegisterMessage(ListSnapshotsRequest) @@ -1639,6 +2404,18 @@ ListSnapshotsResponse = _reflection.GeneratedProtocolMessageType('ListSnapshotsResponse', (_message.Message,), dict( DESCRIPTOR = _LISTSNAPSHOTSRESPONSE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Response for the ``ListSnapshots`` method. + + + Attributes: + snapshots: + The resulting snapshots. + next_page_token: + If not empty, indicates that there may be more snapshot that + match the request; this value should be passed in a new + ``ListSnapshotsRequest``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListSnapshotsResponse) )) _sym_db.RegisterMessage(ListSnapshotsResponse) @@ -1646,6 +2423,15 @@ DeleteSnapshotRequest = _reflection.GeneratedProtocolMessageType('DeleteSnapshotRequest', (_message.Message,), dict( DESCRIPTOR = _DELETESNAPSHOTREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``DeleteSnapshot`` method. + + + Attributes: + snapshot: + The name of the snapshot to delete. Format is + ``projects/{project}/snapshots/{snap}``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.DeleteSnapshotRequest) )) _sym_db.RegisterMessage(DeleteSnapshotRequest) @@ -1653,6 +2439,31 @@ SeekRequest = _reflection.GeneratedProtocolMessageType('SeekRequest', (_message.Message,), dict( DESCRIPTOR = _SEEKREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``Seek`` method. + + + Attributes: + subscription: + The subscription to affect. + time: + The time to seek to. Messages retained in the subscription + that were published before this time are marked as + acknowledged, and messages retained in the subscription that + were published after this time are marked as unacknowledged. + Note that this operation affects only those messages retained + in the subscription (configured by the combination of + ``message_retention_duration`` and ``retain_acked_messages``). + For example, if ``time`` corresponds to a point before the + message retention window (or to a point before the system's + notion of the subscription creation time), only retained + messages will be marked as unacknowledged, and already- + expunged messages will not be restored. + snapshot: + The snapshot to seek to. The snapshot's topic must be the same + as that of the provided subscription. Format is + ``projects/{project}/snapshots/{snap}``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.SeekRequest) )) _sym_db.RegisterMessage(SeekRequest) @@ -1667,10 +2478,16 @@ DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\024com.google.pubsub.v1B\013PubsubProtoP\001Z6google.golang.org/genproto/googleapis/pubsub/v1;pubsub\370\001\001\252\002\026Google.Cloud.PubSub.V1')) +_TOPIC_LABELSENTRY.has_options = True +_TOPIC_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _PUBSUBMESSAGE_ATTRIBUTESENTRY.has_options = True _PUBSUBMESSAGE_ATTRIBUTESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_SUBSCRIPTION_LABELSENTRY.has_options = True +_SUBSCRIPTION_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _PUSHCONFIG_ATTRIBUTESENTRY.has_options = True _PUSHCONFIG_ATTRIBUTESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_SNAPSHOT_LABELSENTRY.has_options = True +_SNAPSHOT_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) try: # THESE ELEMENTS WILL BE DEPRECATED. # Please use the generated *_pb2_grpc.py files instead. @@ -1752,6 +2569,11 @@ def __init__(self, channel): request_serializer=CreateSnapshotRequest.SerializeToString, response_deserializer=Snapshot.FromString, ) + self.UpdateSnapshot = channel.unary_unary( + '/google.pubsub.v1.Subscriber/UpdateSnapshot', + request_serializer=UpdateSnapshotRequest.SerializeToString, + response_deserializer=Snapshot.FromString, + ) self.DeleteSnapshot = channel.unary_unary( '/google.pubsub.v1.Subscriber/DeleteSnapshot', request_serializer=DeleteSnapshotRequest.SerializeToString, @@ -1795,6 +2617,10 @@ def GetSubscription(self, request, context): def UpdateSubscription(self, request, context): """Updates an existing subscription. Note that certain properties of a subscription, such as its topic, are not modifiable. + NOTE: The style guide requires body: "subscription" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') @@ -1905,6 +2731,18 @@ def CreateSnapshot(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def UpdateSnapshot(self, request, context): + """Updates an existing snapshot. Note that certain properties of a snapshot + are not modifiable. + NOTE: The style guide requires body: "snapshot" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def DeleteSnapshot(self, request, context): """Removes an existing snapshot. All messages retained in the snapshot are immediately dropped. After a snapshot is deleted, a new one may be @@ -1986,6 +2824,11 @@ def add_SubscriberServicer_to_server(servicer, server): request_deserializer=CreateSnapshotRequest.FromString, response_serializer=Snapshot.SerializeToString, ), + 'UpdateSnapshot': grpc.unary_unary_rpc_method_handler( + servicer.UpdateSnapshot, + request_deserializer=UpdateSnapshotRequest.FromString, + response_serializer=Snapshot.SerializeToString, + ), 'DeleteSnapshot': grpc.unary_unary_rpc_method_handler( servicer.DeleteSnapshot, request_deserializer=DeleteSnapshotRequest.FromString, @@ -2018,6 +2861,11 @@ def __init__(self, channel): request_serializer=Topic.SerializeToString, response_deserializer=Topic.FromString, ) + self.UpdateTopic = channel.unary_unary( + '/google.pubsub.v1.Publisher/UpdateTopic', + request_serializer=UpdateTopicRequest.SerializeToString, + response_deserializer=Topic.FromString, + ) self.Publish = channel.unary_unary( '/google.pubsub.v1.Publisher/Publish', request_serializer=PublishRequest.SerializeToString, @@ -2057,6 +2905,18 @@ def CreateTopic(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def UpdateTopic(self, request, context): + """Updates an existing topic. Note that certain properties of a topic are not + modifiable. Options settings follow the style guide: + NOTE: The style guide requires body: "topic" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def Publish(self, request, context): """Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist. The message payload must not be empty; it must contain @@ -2106,6 +2966,11 @@ def add_PublisherServicer_to_server(servicer, server): request_deserializer=Topic.FromString, response_serializer=Topic.SerializeToString, ), + 'UpdateTopic': grpc.unary_unary_rpc_method_handler( + servicer.UpdateTopic, + request_deserializer=UpdateTopicRequest.FromString, + response_serializer=Topic.SerializeToString, + ), 'Publish': grpc.unary_unary_rpc_method_handler( servicer.Publish, request_deserializer=PublishRequest.FromString, @@ -2166,6 +3031,10 @@ def GetSubscription(self, request, context): def UpdateSubscription(self, request, context): """Updates an existing subscription. Note that certain properties of a subscription, such as its topic, are not modifiable. + NOTE: The style guide requires body: "subscription" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def ListSubscriptions(self, request, context): @@ -2246,6 +3115,15 @@ def CreateSnapshot(self, request, context): Note that for REST API requests, you must specify a name in the request. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def UpdateSnapshot(self, request, context): + """Updates an existing snapshot. Note that certain properties of a snapshot + are not modifiable. + NOTE: The style guide requires body: "snapshot" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def DeleteSnapshot(self, request, context): """Removes an existing snapshot. All messages retained in the snapshot are immediately dropped. After a snapshot is deleted, a new one may be @@ -2291,6 +3169,10 @@ def GetSubscription(self, request, timeout, metadata=None, with_call=False, prot def UpdateSubscription(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Updates an existing subscription. Note that certain properties of a subscription, such as its topic, are not modifiable. + NOTE: The style guide requires body: "subscription" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. """ raise NotImplementedError() UpdateSubscription.future = None @@ -2380,6 +3262,16 @@ def CreateSnapshot(self, request, timeout, metadata=None, with_call=False, proto """ raise NotImplementedError() CreateSnapshot.future = None + def UpdateSnapshot(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Updates an existing snapshot. Note that certain properties of a snapshot + are not modifiable. + NOTE: The style guide requires body: "snapshot" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + raise NotImplementedError() + UpdateSnapshot.future = None def DeleteSnapshot(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Removes an existing snapshot. All messages retained in the snapshot are immediately dropped. After a snapshot is deleted, a new one may be @@ -2416,6 +3308,7 @@ def beta_create_Subscriber_server(servicer, pool=None, pool_size=None, default_t ('google.pubsub.v1.Subscriber', 'Pull'): PullRequest.FromString, ('google.pubsub.v1.Subscriber', 'Seek'): SeekRequest.FromString, ('google.pubsub.v1.Subscriber', 'StreamingPull'): StreamingPullRequest.FromString, + ('google.pubsub.v1.Subscriber', 'UpdateSnapshot'): UpdateSnapshotRequest.FromString, ('google.pubsub.v1.Subscriber', 'UpdateSubscription'): UpdateSubscriptionRequest.FromString, } response_serializers = { @@ -2432,6 +3325,7 @@ def beta_create_Subscriber_server(servicer, pool=None, pool_size=None, default_t ('google.pubsub.v1.Subscriber', 'Pull'): PullResponse.SerializeToString, ('google.pubsub.v1.Subscriber', 'Seek'): SeekResponse.SerializeToString, ('google.pubsub.v1.Subscriber', 'StreamingPull'): StreamingPullResponse.SerializeToString, + ('google.pubsub.v1.Subscriber', 'UpdateSnapshot'): Snapshot.SerializeToString, ('google.pubsub.v1.Subscriber', 'UpdateSubscription'): Subscription.SerializeToString, } method_implementations = { @@ -2448,6 +3342,7 @@ def beta_create_Subscriber_server(servicer, pool=None, pool_size=None, default_t ('google.pubsub.v1.Subscriber', 'Pull'): face_utilities.unary_unary_inline(servicer.Pull), ('google.pubsub.v1.Subscriber', 'Seek'): face_utilities.unary_unary_inline(servicer.Seek), ('google.pubsub.v1.Subscriber', 'StreamingPull'): face_utilities.stream_stream_inline(servicer.StreamingPull), + ('google.pubsub.v1.Subscriber', 'UpdateSnapshot'): face_utilities.unary_unary_inline(servicer.UpdateSnapshot), ('google.pubsub.v1.Subscriber', 'UpdateSubscription'): face_utilities.unary_unary_inline(servicer.UpdateSubscription), } server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) @@ -2474,6 +3369,7 @@ def beta_create_Subscriber_stub(channel, host=None, metadata_transformer=None, p ('google.pubsub.v1.Subscriber', 'Pull'): PullRequest.SerializeToString, ('google.pubsub.v1.Subscriber', 'Seek'): SeekRequest.SerializeToString, ('google.pubsub.v1.Subscriber', 'StreamingPull'): StreamingPullRequest.SerializeToString, + ('google.pubsub.v1.Subscriber', 'UpdateSnapshot'): UpdateSnapshotRequest.SerializeToString, ('google.pubsub.v1.Subscriber', 'UpdateSubscription'): UpdateSubscriptionRequest.SerializeToString, } response_deserializers = { @@ -2490,6 +3386,7 @@ def beta_create_Subscriber_stub(channel, host=None, metadata_transformer=None, p ('google.pubsub.v1.Subscriber', 'Pull'): PullResponse.FromString, ('google.pubsub.v1.Subscriber', 'Seek'): SeekResponse.FromString, ('google.pubsub.v1.Subscriber', 'StreamingPull'): StreamingPullResponse.FromString, + ('google.pubsub.v1.Subscriber', 'UpdateSnapshot'): Snapshot.FromString, ('google.pubsub.v1.Subscriber', 'UpdateSubscription'): Subscription.FromString, } cardinalities = { @@ -2506,6 +3403,7 @@ def beta_create_Subscriber_stub(channel, host=None, metadata_transformer=None, p 'Pull': cardinality.Cardinality.UNARY_UNARY, 'Seek': cardinality.Cardinality.UNARY_UNARY, 'StreamingPull': cardinality.Cardinality.STREAM_STREAM, + 'UpdateSnapshot': cardinality.Cardinality.UNARY_UNARY, 'UpdateSubscription': cardinality.Cardinality.UNARY_UNARY, } stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) @@ -2525,6 +3423,15 @@ def CreateTopic(self, request, context): """Creates the given topic with the given name. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def UpdateTopic(self, request, context): + """Updates an existing topic. Note that certain properties of a topic are not + modifiable. Options settings follow the style guide: + NOTE: The style guide requires body: "topic" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def Publish(self, request, context): """Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist. The message payload must not be empty; it must contain @@ -2567,6 +3474,16 @@ def CreateTopic(self, request, timeout, metadata=None, with_call=False, protocol """ raise NotImplementedError() CreateTopic.future = None + def UpdateTopic(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Updates an existing topic. Note that certain properties of a topic are not + modifiable. Options settings follow the style guide: + NOTE: The style guide requires body: "topic" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + raise NotImplementedError() + UpdateTopic.future = None def Publish(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist. The message payload must not be empty; it must contain @@ -2613,6 +3530,7 @@ def beta_create_Publisher_server(servicer, pool=None, pool_size=None, default_ti ('google.pubsub.v1.Publisher', 'ListTopicSubscriptions'): ListTopicSubscriptionsRequest.FromString, ('google.pubsub.v1.Publisher', 'ListTopics'): ListTopicsRequest.FromString, ('google.pubsub.v1.Publisher', 'Publish'): PublishRequest.FromString, + ('google.pubsub.v1.Publisher', 'UpdateTopic'): UpdateTopicRequest.FromString, } response_serializers = { ('google.pubsub.v1.Publisher', 'CreateTopic'): Topic.SerializeToString, @@ -2621,6 +3539,7 @@ def beta_create_Publisher_server(servicer, pool=None, pool_size=None, default_ti ('google.pubsub.v1.Publisher', 'ListTopicSubscriptions'): ListTopicSubscriptionsResponse.SerializeToString, ('google.pubsub.v1.Publisher', 'ListTopics'): ListTopicsResponse.SerializeToString, ('google.pubsub.v1.Publisher', 'Publish'): PublishResponse.SerializeToString, + ('google.pubsub.v1.Publisher', 'UpdateTopic'): Topic.SerializeToString, } method_implementations = { ('google.pubsub.v1.Publisher', 'CreateTopic'): face_utilities.unary_unary_inline(servicer.CreateTopic), @@ -2629,6 +3548,7 @@ def beta_create_Publisher_server(servicer, pool=None, pool_size=None, default_ti ('google.pubsub.v1.Publisher', 'ListTopicSubscriptions'): face_utilities.unary_unary_inline(servicer.ListTopicSubscriptions), ('google.pubsub.v1.Publisher', 'ListTopics'): face_utilities.unary_unary_inline(servicer.ListTopics), ('google.pubsub.v1.Publisher', 'Publish'): face_utilities.unary_unary_inline(servicer.Publish), + ('google.pubsub.v1.Publisher', 'UpdateTopic'): face_utilities.unary_unary_inline(servicer.UpdateTopic), } server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) return beta_implementations.server(method_implementations, options=server_options) @@ -2647,6 +3567,7 @@ def beta_create_Publisher_stub(channel, host=None, metadata_transformer=None, po ('google.pubsub.v1.Publisher', 'ListTopicSubscriptions'): ListTopicSubscriptionsRequest.SerializeToString, ('google.pubsub.v1.Publisher', 'ListTopics'): ListTopicsRequest.SerializeToString, ('google.pubsub.v1.Publisher', 'Publish'): PublishRequest.SerializeToString, + ('google.pubsub.v1.Publisher', 'UpdateTopic'): UpdateTopicRequest.SerializeToString, } response_deserializers = { ('google.pubsub.v1.Publisher', 'CreateTopic'): Topic.FromString, @@ -2655,6 +3576,7 @@ def beta_create_Publisher_stub(channel, host=None, metadata_transformer=None, po ('google.pubsub.v1.Publisher', 'ListTopicSubscriptions'): ListTopicSubscriptionsResponse.FromString, ('google.pubsub.v1.Publisher', 'ListTopics'): ListTopicsResponse.FromString, ('google.pubsub.v1.Publisher', 'Publish'): PublishResponse.FromString, + ('google.pubsub.v1.Publisher', 'UpdateTopic'): Topic.FromString, } cardinalities = { 'CreateTopic': cardinality.Cardinality.UNARY_UNARY, @@ -2663,6 +3585,7 @@ def beta_create_Publisher_stub(channel, host=None, metadata_transformer=None, po 'ListTopicSubscriptions': cardinality.Cardinality.UNARY_UNARY, 'ListTopics': cardinality.Cardinality.UNARY_UNARY, 'Publish': cardinality.Cardinality.UNARY_UNARY, + 'UpdateTopic': cardinality.Cardinality.UNARY_UNARY, } stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) return beta_implementations.dynamic_stub(channel, 'google.pubsub.v1.Publisher', cardinalities, options=stub_options) diff --git a/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2_grpc.py b/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2_grpc.py index 5a970cbc77ab..06dd470470d8 100644 --- a/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2_grpc.py +++ b/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2_grpc.py @@ -76,6 +76,11 @@ def __init__(self, channel): request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.CreateSnapshotRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Snapshot.FromString, ) + self.UpdateSnapshot = channel.unary_unary( + '/google.pubsub.v1.Subscriber/UpdateSnapshot', + request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.UpdateSnapshotRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Snapshot.FromString, + ) self.DeleteSnapshot = channel.unary_unary( '/google.pubsub.v1.Subscriber/DeleteSnapshot', request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.DeleteSnapshotRequest.SerializeToString, @@ -119,6 +124,10 @@ def GetSubscription(self, request, context): def UpdateSubscription(self, request, context): """Updates an existing subscription. Note that certain properties of a subscription, such as its topic, are not modifiable. + NOTE: The style guide requires body: "subscription" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') @@ -229,6 +238,18 @@ def CreateSnapshot(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def UpdateSnapshot(self, request, context): + """Updates an existing snapshot. Note that certain properties of a snapshot + are not modifiable. + NOTE: The style guide requires body: "snapshot" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def DeleteSnapshot(self, request, context): """Removes an existing snapshot. All messages retained in the snapshot are immediately dropped. After a snapshot is deleted, a new one may be @@ -310,6 +331,11 @@ def add_SubscriberServicer_to_server(servicer, server): request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.CreateSnapshotRequest.FromString, response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Snapshot.SerializeToString, ), + 'UpdateSnapshot': grpc.unary_unary_rpc_method_handler( + servicer.UpdateSnapshot, + request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.UpdateSnapshotRequest.FromString, + response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Snapshot.SerializeToString, + ), 'DeleteSnapshot': grpc.unary_unary_rpc_method_handler( servicer.DeleteSnapshot, request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.DeleteSnapshotRequest.FromString, @@ -342,6 +368,11 @@ def __init__(self, channel): request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.SerializeToString, response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.FromString, ) + self.UpdateTopic = channel.unary_unary( + '/google.pubsub.v1.Publisher/UpdateTopic', + request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.UpdateTopicRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.FromString, + ) self.Publish = channel.unary_unary( '/google.pubsub.v1.Publisher/Publish', request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.PublishRequest.SerializeToString, @@ -381,6 +412,18 @@ def CreateTopic(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def UpdateTopic(self, request, context): + """Updates an existing topic. Note that certain properties of a topic are not + modifiable. Options settings follow the style guide: + NOTE: The style guide requires body: "topic" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def Publish(self, request, context): """Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist. The message payload must not be empty; it must contain @@ -430,6 +473,11 @@ def add_PublisherServicer_to_server(servicer, server): request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.FromString, response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.SerializeToString, ), + 'UpdateTopic': grpc.unary_unary_rpc_method_handler( + servicer.UpdateTopic, + request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.UpdateTopicRequest.FromString, + response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.SerializeToString, + ), 'Publish': grpc.unary_unary_rpc_method_handler( servicer.Publish, request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.PublishRequest.FromString, diff --git a/pubsub/google/cloud/pubsub_v1/publisher/client.py b/pubsub/google/cloud/pubsub_v1/publisher/client.py index 1a9903a31748..e80662a715ef 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/client.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/client.py @@ -39,8 +39,8 @@ class Client(object): get sensible defaults. Args: - batch_settings (~.pubsub_v1.types.BatchSettings): The settings - for batch publishing. + batch_settings (~google.cloud.pubsub_v1.types.BatchSettings): The + settings for batch publishing. batch_class (class): A class that describes how to handle batches. You may subclass the :class:`.pubsub_v1.publisher.batch.base.BaseBatch` class in @@ -73,8 +73,8 @@ def batch(self, topic, message, create=True, autocommit=True): Args: topic (str): A string representing the topic. - message (~.pubsub_v1.types.PubsubMessage): The message that will - be committed. + message (~google.cloud.pubsub_v1.types.PubsubMessage): The message + that will be committed. create (bool): Whether to create a new batch if no batch is found. Defaults to True. autocommit (bool): Whether to autocommit this batch. @@ -128,16 +128,15 @@ def publish(self, topic, data, **attrs): >>> response = client.publish(topic, data, username='guido') Args: - topic (~.pubsub_v1.types.Topic): The topic to publish - messages to. + topic (str): The topic to publish messages to. data (bytes): A bytestring representing the message body. This must be a bytestring. attrs (Mapping[str, str]): A dictionary of attributes to be sent as metadata. (These may be text strings or byte strings.) Returns: - ~.pubsub_v1.publisher.futures.Future: An object conforming - to the ``concurrent.futures.Future`` interface. + ~concurrent.futures.Future: An object conforming to the + ``concurrent.futures.Future`` interface. """ # Sanity check: Is the data being sent as a bytestring? # If it is literally anything else, complain loudly about it. diff --git a/pubsub/google/cloud/pubsub_v1/publisher/exceptions.py b/pubsub/google/cloud/pubsub_v1/publisher/exceptions.py index 5b5d63d51494..bae090ceb9d7 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/exceptions.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/exceptions.py @@ -14,14 +14,9 @@ from __future__ import absolute_import -from google.api.core.exceptions import GoogleAPICallError - +from concurrent.futures import TimeoutError -try: - from concurrent.futures import TimeoutError -except ImportError: - class TimeoutError(Exception): - pass +from google.api.core.exceptions import GoogleAPICallError class PublishError(GoogleAPICallError): diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py index 1e97c324b2e9..1015149cfbbf 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/message.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py @@ -28,16 +28,11 @@ class Message(object): implementing a custom subclass to :class:`~.pubsub_v1.subscriber.consumer.BaseConsumer`.) - .. note:: - Messages in Google Cloud Pub/Sub are opaque blobs of bytes. This - means that the ``data`` attribute will consistently be a - :class:`bytes` object. If you want a text string, you should - use :meth:`bytes.decode`. - - Properties: + Attributes: message_id (str): The message ID. In general, you should not need to use this directly. - data (bytes): The data in the message. + data (bytes): The data in the message. Note that this will be a + :class:`bytes`, not a text string. attributes (dict): The attributes sent along with the message. publish_time (datetime): The time that this message was originally published. diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py index e83a93e287a9..df0f965748de 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -31,7 +31,7 @@ class Policy(base.BasePolicy): - """A consumer class based on :class:``threading.Thread``. + """A consumer class based on :class:`threading.Thread`. This consumer handles the connection to the Pub/Sub service and all of the concurrency needs. @@ -46,8 +46,8 @@ def __init__(self, client, subscription, flow_control=types.FlowControl(), subscription (str): The name of the subscription. The canonical format for this is ``projects/{project}/subscriptions/{subscription}``. - flow_control (~.pubsub_v1.types.FlowControl): The flow control - settings. + flow_control (~google.cloud.pubsub_v1.types.FlowControl): The flow + control settings. executor (~concurrent.futures.ThreadPoolExecutor): (Optional.) A ThreadPoolExecutor instance, or anything duck-type compatible with it. diff --git a/pubsub/google/cloud/pubsub_v1/types.py b/pubsub/google/cloud/pubsub_v1/types.py index 522fb63fee7a..a9de4a88f7f8 100644 --- a/pubsub/google/cloud/pubsub_v1/types.py +++ b/pubsub/google/cloud/pubsub_v1/types.py @@ -29,7 +29,8 @@ # This class is used when creating a publisher or subscriber client, and # these settings can be altered to tweak Pub/Sub behavior. # The defaults should be fine for most use cases. -BatchSettings = collections.namedtuple('BatchSettings', +BatchSettings = collections.namedtuple( + 'BatchSettings', ['max_bytes', 'max_latency', 'max_messages'], ) BatchSettings.__new__.__defaults__ = ( @@ -43,7 +44,8 @@ # This class is used when creating a publisher or subscriber client, and # these settings can be altered to tweak Pub/Sub behavior. # The defaults should be fine for most use cases. -FlowControl = collections.namedtuple('FlowControl', +FlowControl = collections.namedtuple( + 'FlowControl', ['max_bytes', 'max_messages', 'resume_threshold'], ) FlowControl.__new__.__defaults__ = ( @@ -60,6 +62,7 @@ _names = ['BatchSettings', 'FlowControl', 'Timestamp'] for name, message in get_messages(pubsub_pb2).items(): + message.__module__ = 'google.cloud.pubsub_v1.types' setattr(sys.modules[__name__], name, message) _names.append(name)