diff --git a/docs/bigtable-client-intro.rst b/docs/bigtable-client-intro.rst index 55111ad1dfb5..db04ffa0e0c1 100644 --- a/docs/bigtable-client-intro.rst +++ b/docs/bigtable-client-intro.rst @@ -63,7 +63,7 @@ Configuration Admin API Access ---------------- -If you'll be using your client to make `Cluster Admin`_ and `Table Admin`_ +If you'll be using your client to make `Instance Admin`_ and `Table Admin`_ API requests, you'll need to pass the ``admin`` argument: .. code:: python @@ -89,10 +89,10 @@ Next Step --------- After a :class:`Client `, the next highest-level -object is a :class:`Cluster `. You'll need +object is a :class:`Instance `. You'll need one before you can interact with tables or data. -Head next to learn about the :doc:`bigtable-cluster-api`. +Head next to learn about the :doc:`bigtable-instance-api`. -.. _Cluster Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1 +.. _Instance Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1 .. _Table Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1 diff --git a/docs/bigtable-cluster-api.rst b/docs/bigtable-cluster-api.rst deleted file mode 100644 index 1266fa8e893a..000000000000 --- a/docs/bigtable-cluster-api.rst +++ /dev/null @@ -1,187 +0,0 @@ -Cluster Admin API -================= - -.. warning:: - - gRPC is required for using the Cloud Bigtable API. As of May 2016, - ``grpcio`` is only supported in Python 2.7, so importing - :mod:`gcloud.bigtable` in other versions of Python will fail. - -After creating a :class:`Client `, you can -interact with individual clusters, groups of clusters or available -zones for a project. - -List Clusters -------------- - -If you want a comprehensive list of all existing clusters, make a -`ListClusters`_ API request with -:meth:`Client.list_clusters() `: - -.. code:: python - - clusters = client.list_clusters() - -List Zones ----------- - -If you aren't sure which ``zone`` to create a cluster in, find out -which zones your project has access to with a `ListZones`_ API request -with :meth:`Client.list_zones() `: - -.. code:: python - - zones = client.list_zones() - -You can choose a :class:`string ` from among the result to pass to -the :class:`Cluster ` constructor. - -The available zones (as of February 2016) are - -.. code:: python - - >>> zones - [u'asia-east1-b', u'europe-west1-c', u'us-central1-c', u'us-central1-b'] - -Cluster Factory ---------------- - -To create a :class:`Cluster ` object: - -.. code:: python - - cluster = client.cluster(zone, cluster_id, - display_name=display_name, - serve_nodes=3) - -Both ``display_name`` and ``serve_nodes`` are optional. When not provided, -``display_name`` defaults to the ``cluster_id`` value and ``serve_nodes`` -defaults to the minimum allowed: -:data:`DEFAULT_SERVE_NODES `. - -Even if this :class:`Cluster ` already -has been created with the API, you'll want this object to use as a -parent of a :class:`Table ` just as the -:class:`Client ` is used as the parent of -a :class:`Cluster `. - -Create a new Cluster --------------------- - -After creating the cluster object, make a `CreateCluster`_ API request -with :meth:`create() `: - -.. code:: python - - cluster.display_name = 'My very own cluster' - cluster.create() - -If you would like more than the minimum number of nodes -(:data:`DEFAULT_SERVE_NODES `) -in your cluster: - -.. code:: python - - cluster.serve_nodes = 10 - cluster.create() - -Check on Current Operation --------------------------- - -.. note:: - - When modifying a cluster (via a `CreateCluster`_, `UpdateCluster`_ or - `UndeleteCluster`_ request), the Bigtable API will return a - `long-running operation`_ and a corresponding - :class:`Operation ` object - will be returned by each of - :meth:`create() `, - :meth:`update() ` and - :meth:`undelete() `. - -You can check if a long-running operation (for a -:meth:`create() `, -:meth:`update() ` or -:meth:`undelete() `) has finished -by making a `GetOperation`_ request with -:meth:`Operation.finished() `: - -.. code:: python - - >>> operation = cluster.create() - >>> operation.finished() - True - -.. note:: - - Once an :class:`Operation ` object - has returned :data:`True` from - :meth:`finished() `, the - object should not be re-used. Subsequent calls to - :meth:`finished() ` - will result in a :class:`ValueError `. - -Get metadata for an existing Cluster ------------------------------------- - -After creating the cluster object, make a `GetCluster`_ API request -with :meth:`reload() `: - -.. code:: python - - cluster.reload() - -This will load ``serve_nodes`` and ``display_name`` for the existing -``cluster`` in addition to the ``cluster_id``, ``zone`` and ``project`` -already set on the :class:`Cluster ` object. - -Update an existing Cluster --------------------------- - -After creating the cluster object, make an `UpdateCluster`_ API request -with :meth:`update() `: - -.. code:: python - - client.display_name = 'New display_name' - cluster.update() - -Delete an existing Cluster --------------------------- - -Make a `DeleteCluster`_ API request with -:meth:`delete() `: - -.. code:: python - - cluster.delete() - -Undelete a deleted Cluster --------------------------- - -Make an `UndeleteCluster`_ API request with -:meth:`undelete() `: - -.. code:: python - - cluster.undelete() - -Next Step ---------- - -Now we go down the hierarchy from -:class:`Cluster ` to a -:class:`Table `. - -Head next to learn about the :doc:`bigtable-table-api`. - -.. _Cluster Admin API: https://cloud.google.com/bigtable/docs/creating-cluster -.. _CreateCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L66-L68 -.. _GetCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L38-L40 -.. _UpdateCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L93-L95 -.. _DeleteCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L109-L111 -.. _ListZones: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L33-L35 -.. _ListClusters: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L44-L46 -.. _GetOperation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L43-L45 -.. _UndeleteCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L126-L128 -.. _long-running operation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L73-L102 diff --git a/docs/bigtable-instance-api.rst b/docs/bigtable-instance-api.rst new file mode 100644 index 000000000000..c2fd1402a97b --- /dev/null +++ b/docs/bigtable-instance-api.rst @@ -0,0 +1,133 @@ +Instance Admin API +================== + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +After creating a :class:`Client `, you can +interact with individual instances for a project. + +List Intances +------------- + +If you want a comprehensive list of all existing intances, make a +`ListInstances`_ API request with +:meth:`Client.list_intances() `: + +.. code:: python + + intances = client.list_intances() + +Instance Factory +---------------- + +To create a :class:`Instance ` object: + +.. code:: python + + instance = client.instance(instance_id, display_name=display_name) + +``display_name`` is optional. When not provided, +``display_name`` defaults to the ``instance_id`` value. + +Even if this :class:`Instance ` already +has been created with the API, you'll want this object to use as a +parent of a :class:`Table ` just as the +:class:`Client ` is used as the parent of +a :class:`Instance `. + +Create a new Instance +--------------------- + +After creating the instance object, make a `CreateInstance`_ API request +with :meth:`create() `: + +.. code:: python + + instance.display_name = 'My very own instance' + instance.create() + +Check on Current Operation +-------------------------- + +.. note:: + + When modifying a instance (via a `CreateInstance`_ request), the Bigtable + API will return a `long-running operation`_ and a corresponding + :class:`Operation ` object + will be returned by + :meth:`create() ``. + +You can check if a long-running operation (for a +:meth:`create() ` has finished +by making a `GetOperation`_ request with +:meth:`Operation.finished() `: + +.. code:: python + + >>> operation = instance.create() + >>> operation.finished() + True + +.. note:: + + Once an :class:`Operation ` object + has returned :data:`True` from + :meth:`finished() `, the + object should not be re-used. Subsequent calls to + :meth:`finished() ` + will result in a :class:`ValueError `. + +Get metadata for an existing Instance +------------------------------------- + +After creating the instance object, make a `GetInstance`_ API request +with :meth:`reload() `: + +.. code:: python + + instance.reload() + +This will load ``display_name`` for the existing ``instance`` object. + +Update an existing Instance +--------------------------- + +After creating the instance object, make an `UpdateInstance`_ API request +with :meth:`update() `: + +.. code:: python + + client.display_name = 'New display_name' + instance.update() + +Delete an existing Instance +--------------------------- + +Make a `DeleteInstance`_ API request with +:meth:`delete() `: + +.. code:: python + + instance.delete() + +Next Step +--------- + +Now we go down the hierarchy from +:class:`Instance ` to a +:class:`Table `. + +Head next to learn about the :doc:`bigtable-table-api`. + +.. _Instance Admin API: https://cloud.google.com/bigtable/docs/creating-instance +.. _CreateInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L66-L68 +.. _GetInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L38-L40 +.. _UpdateInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L93-L95 +.. _DeleteInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L109-L111 +.. _ListInstances: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L44-L46 +.. _GetOperation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L43-L45 +.. _long-running operation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L73-L102 diff --git a/docs/bigtable-instance.rst b/docs/bigtable-instance.rst new file mode 100644 index 000000000000..7ba1c15d8df3 --- /dev/null +++ b/docs/bigtable-instance.rst @@ -0,0 +1,12 @@ +Instance +~~~~~~~~ + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +.. automodule:: gcloud.bigtable.instance + :members: + :show-inheritance: diff --git a/docs/bigtable-table-api.rst b/docs/bigtable-table-api.rst index 78ac3c6f079a..554b157031f9 100644 --- a/docs/bigtable-table-api.rst +++ b/docs/bigtable-table-api.rst @@ -7,20 +7,20 @@ Table Admin API ``grpcio`` is only supported in Python 2.7, so importing :mod:`gcloud.bigtable` in other versions of Python will fail. -After creating a :class:`Cluster `, you can +After creating a :class:`Instance `, you can interact with individual tables, groups of tables or column families within a table. List Tables ----------- -If you want a comprehensive list of all existing tables in a cluster, make a +If you want a comprehensive list of all existing tables in a instance, make a `ListTables`_ API request with -:meth:`Cluster.list_tables() `: +:meth:`Instance.list_tables() `: .. code:: python - >>> cluster.list_tables() + >>> instance.list_tables() [, ] @@ -31,7 +31,7 @@ To create a :class:`Table ` object: .. code:: python - table = cluster.table(table_id) + table = instance.table(table_id) Even if this :class:`Table ` already has been created with the API, you'll want this object to use as a diff --git a/docs/index.rst b/docs/index.rst index df0aa0ea9980..b263dba70531 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -63,10 +63,11 @@ bigtable-usage HappyBase bigtable-client-intro - bigtable-cluster-api + bigtable-instance-api bigtable-table-api bigtable-data-api Client + bigtable-instance bigtable-cluster bigtable-table bigtable-column-family diff --git a/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py index 344918dc1c44..4d02b6e71bb8 100644 --- a/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py +++ b/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py @@ -613,4 +613,186 @@ DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001')) _CREATEINSTANCEREQUEST_CLUSTERSENTRY.has_options = True _CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +import abc +import six +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +class BetaBigtableInstanceAdminServicer(object): + """""" + def CreateInstance(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def GetInstance(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListInstances(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def UpdateInstance(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteInstance(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def CreateCluster(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def GetCluster(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListClusters(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def UpdateCluster(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteCluster(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + +class BetaBigtableInstanceAdminStub(object): + """The interface to which stubs will conform.""" + def CreateInstance(self, request, timeout): + raise NotImplementedError() + CreateInstance.future = None + def GetInstance(self, request, timeout): + raise NotImplementedError() + GetInstance.future = None + def ListInstances(self, request, timeout): + raise NotImplementedError() + ListInstances.future = None + def UpdateInstance(self, request, timeout): + raise NotImplementedError() + UpdateInstance.future = None + def DeleteInstance(self, request, timeout): + raise NotImplementedError() + DeleteInstance.future = None + def CreateCluster(self, request, timeout): + raise NotImplementedError() + CreateCluster.future = None + def GetCluster(self, request, timeout): + raise NotImplementedError() + GetCluster.future = None + def ListClusters(self, request, timeout): + raise NotImplementedError() + ListClusters.future = None + def UpdateCluster(self, request, timeout): + raise NotImplementedError() + UpdateCluster.future = None + def DeleteCluster(self, request, timeout): + raise NotImplementedError() + DeleteCluster.future = None + +def beta_create_BigtableInstanceAdmin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.longrunning.operations_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.protobuf.empty_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.longrunning.operations_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.longrunning.operations_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.protobuf.empty_pb2 + request_deserializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.CreateClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.CreateInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.DeleteClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.DeleteInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.GetClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.GetInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListClustersRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListInstancesRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google.bigtable.admin.v2.instance_pb2.Cluster.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google.bigtable.admin.v2.instance_pb2.Instance.FromString, + } + response_serializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google.longrunning.operations_pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google.longrunning.operations_pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google.protobuf.empty_pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google.protobuf.empty_pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google.bigtable.admin.v2.instance_pb2.Cluster.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google.bigtable.admin.v2.instance_pb2.Instance.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListClustersResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListInstancesResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google.longrunning.operations_pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google.bigtable.admin.v2.instance_pb2.Instance.SerializeToString, + } + method_implementations = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): face_utilities.unary_unary_inline(servicer.CreateCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): face_utilities.unary_unary_inline(servicer.CreateInstance), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): face_utilities.unary_unary_inline(servicer.DeleteCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): face_utilities.unary_unary_inline(servicer.DeleteInstance), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): face_utilities.unary_unary_inline(servicer.GetCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): face_utilities.unary_unary_inline(servicer.GetInstance), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): face_utilities.unary_unary_inline(servicer.ListClusters), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): face_utilities.unary_unary_inline(servicer.ListInstances), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): face_utilities.unary_unary_inline(servicer.UpdateCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): face_utilities.unary_unary_inline(servicer.UpdateInstance), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + +def beta_create_BigtableInstanceAdmin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.longrunning.operations_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.protobuf.empty_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.longrunning.operations_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.bigtable.admin.v2.instance_pb2 + import google.longrunning.operations_pb2 + import google.bigtable.admin.v2.bigtable_instance_admin_pb2 + import google.protobuf.empty_pb2 + request_serializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.CreateClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.CreateInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.DeleteClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.DeleteInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.GetClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.GetInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListClustersRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListInstancesRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google.bigtable.admin.v2.instance_pb2.Cluster.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google.bigtable.admin.v2.instance_pb2.Instance.SerializeToString, + } + response_deserializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google.longrunning.operations_pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google.longrunning.operations_pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google.protobuf.empty_pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google.protobuf.empty_pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google.bigtable.admin.v2.instance_pb2.Cluster.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google.bigtable.admin.v2.instance_pb2.Instance.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListClustersResponse.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): google.bigtable.admin.v2.bigtable_instance_admin_pb2.ListInstancesResponse.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google.longrunning.operations_pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google.bigtable.admin.v2.instance_pb2.Instance.FromString, + } + cardinalities = { + 'CreateCluster': cardinality.Cardinality.UNARY_UNARY, + 'CreateInstance': cardinality.Cardinality.UNARY_UNARY, + 'DeleteCluster': cardinality.Cardinality.UNARY_UNARY, + 'DeleteInstance': cardinality.Cardinality.UNARY_UNARY, + 'GetCluster': cardinality.Cardinality.UNARY_UNARY, + 'GetInstance': cardinality.Cardinality.UNARY_UNARY, + 'ListClusters': cardinality.Cardinality.UNARY_UNARY, + 'ListInstances': cardinality.Cardinality.UNARY_UNARY, + 'UpdateCluster': cardinality.Cardinality.UNARY_UNARY, + 'UpdateInstance': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.v2.BigtableInstanceAdmin', cardinalities, options=stub_options) # @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/bigtable_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_pb2.py index 5c9e39dc4e89..ffb5f5fa3eba 100644 --- a/gcloud/bigtable/_generated_v2/bigtable_pb2.py +++ b/gcloud/bigtable/_generated_v2/bigtable_pb2.py @@ -804,4 +804,123 @@ DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\rBigtableProtoP\001')) +import abc +import six +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +class BetaBigtableServicer(object): + """""" + def ReadRows(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def SampleRowKeys(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def MutateRow(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def MutateRows(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def CheckAndMutateRow(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ReadModifyWriteRow(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + +class BetaBigtableStub(object): + """The interface to which stubs will conform.""" + def ReadRows(self, request, timeout): + raise NotImplementedError() + def SampleRowKeys(self, request, timeout): + raise NotImplementedError() + def MutateRow(self, request, timeout): + raise NotImplementedError() + MutateRow.future = None + def MutateRows(self, request, timeout): + raise NotImplementedError() + def CheckAndMutateRow(self, request, timeout): + raise NotImplementedError() + CheckAndMutateRow.future = None + def ReadModifyWriteRow(self, request, timeout): + raise NotImplementedError() + ReadModifyWriteRow.future = None + +def beta_create_Bigtable_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + request_deserializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): google.bigtable.v2.bigtable_pb2.CheckAndMutateRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): google.bigtable.v2.bigtable_pb2.MutateRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): google.bigtable.v2.bigtable_pb2.MutateRowsRequest.FromString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): google.bigtable.v2.bigtable_pb2.ReadModifyWriteRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): google.bigtable.v2.bigtable_pb2.ReadRowsRequest.FromString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): google.bigtable.v2.bigtable_pb2.SampleRowKeysRequest.FromString, + } + response_serializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): google.bigtable.v2.bigtable_pb2.CheckAndMutateRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): google.bigtable.v2.bigtable_pb2.MutateRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): google.bigtable.v2.bigtable_pb2.MutateRowsResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): google.bigtable.v2.bigtable_pb2.ReadModifyWriteRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): google.bigtable.v2.bigtable_pb2.ReadRowsResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): google.bigtable.v2.bigtable_pb2.SampleRowKeysResponse.SerializeToString, + } + method_implementations = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): face_utilities.unary_unary_inline(servicer.CheckAndMutateRow), + ('google.bigtable.v2.Bigtable', 'MutateRow'): face_utilities.unary_unary_inline(servicer.MutateRow), + ('google.bigtable.v2.Bigtable', 'MutateRows'): face_utilities.unary_stream_inline(servicer.MutateRows), + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): face_utilities.unary_unary_inline(servicer.ReadModifyWriteRow), + ('google.bigtable.v2.Bigtable', 'ReadRows'): face_utilities.unary_stream_inline(servicer.ReadRows), + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): face_utilities.unary_stream_inline(servicer.SampleRowKeys), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + +def beta_create_Bigtable_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + import google.bigtable.v2.bigtable_pb2 + request_serializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): google.bigtable.v2.bigtable_pb2.CheckAndMutateRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): google.bigtable.v2.bigtable_pb2.MutateRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): google.bigtable.v2.bigtable_pb2.MutateRowsRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): google.bigtable.v2.bigtable_pb2.ReadModifyWriteRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): google.bigtable.v2.bigtable_pb2.ReadRowsRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): google.bigtable.v2.bigtable_pb2.SampleRowKeysRequest.SerializeToString, + } + response_deserializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): google.bigtable.v2.bigtable_pb2.CheckAndMutateRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): google.bigtable.v2.bigtable_pb2.MutateRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): google.bigtable.v2.bigtable_pb2.MutateRowsResponse.FromString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): google.bigtable.v2.bigtable_pb2.ReadModifyWriteRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): google.bigtable.v2.bigtable_pb2.ReadRowsResponse.FromString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): google.bigtable.v2.bigtable_pb2.SampleRowKeysResponse.FromString, + } + cardinalities = { + 'CheckAndMutateRow': cardinality.Cardinality.UNARY_UNARY, + 'MutateRow': cardinality.Cardinality.UNARY_UNARY, + 'MutateRows': cardinality.Cardinality.UNARY_STREAM, + 'ReadModifyWriteRow': cardinality.Cardinality.UNARY_UNARY, + 'ReadRows': cardinality.Cardinality.UNARY_STREAM, + 'SampleRowKeys': cardinality.Cardinality.UNARY_STREAM, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.bigtable.v2.Bigtable', cardinalities, options=stub_options) # @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py b/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py index bd695d5f6b3e..8a884a8b91e3 100644 --- a/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py +++ b/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py @@ -504,4 +504,126 @@ DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001')) +import abc +import six +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +class BetaBigtableTableAdminServicer(object): + """""" + def CreateTable(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListTables(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def GetTable(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteTable(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ModifyColumnFamilies(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DropRowRange(self, request, context): + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + +class BetaBigtableTableAdminStub(object): + """The interface to which stubs will conform.""" + def CreateTable(self, request, timeout): + raise NotImplementedError() + CreateTable.future = None + def ListTables(self, request, timeout): + raise NotImplementedError() + ListTables.future = None + def GetTable(self, request, timeout): + raise NotImplementedError() + GetTable.future = None + def DeleteTable(self, request, timeout): + raise NotImplementedError() + DeleteTable.future = None + def ModifyColumnFamilies(self, request, timeout): + raise NotImplementedError() + ModifyColumnFamilies.future = None + def DropRowRange(self, request, timeout): + raise NotImplementedError() + DropRowRange.future = None + +def beta_create_BigtableTableAdmin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.table_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.table_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.protobuf.empty_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.table_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.protobuf.empty_pb2 + request_deserializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.CreateTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.DeleteTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google.bigtable.admin.v2.bigtable_table_admin_pb2.DropRowRangeRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.GetTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ModifyColumnFamiliesRequest.FromString, + } + response_serializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google.bigtable.admin.v2.table_pb2.Table.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google.protobuf.empty_pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google.protobuf.empty_pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google.bigtable.admin.v2.table_pb2.Table.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google.bigtable.admin.v2.table_pb2.Table.SerializeToString, + } + method_implementations = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): face_utilities.unary_unary_inline(servicer.CreateTable), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): face_utilities.unary_unary_inline(servicer.DeleteTable), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): face_utilities.unary_unary_inline(servicer.DropRowRange), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): face_utilities.unary_unary_inline(servicer.GetTable), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): face_utilities.unary_unary_inline(servicer.ListTables), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): face_utilities.unary_unary_inline(servicer.ModifyColumnFamilies), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + +def beta_create_BigtableTableAdmin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.table_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.table_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.protobuf.empty_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.bigtable.admin.v2.table_pb2 + import google.bigtable.admin.v2.bigtable_table_admin_pb2 + import google.protobuf.empty_pb2 + request_serializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.CreateTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.DeleteTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google.bigtable.admin.v2.bigtable_table_admin_pb2.DropRowRangeRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google.bigtable.admin.v2.bigtable_table_admin_pb2.GetTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ModifyColumnFamiliesRequest.SerializeToString, + } + response_deserializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google.bigtable.admin.v2.table_pb2.Table.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google.protobuf.empty_pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google.protobuf.empty_pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google.bigtable.admin.v2.table_pb2.Table.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesResponse.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google.bigtable.admin.v2.table_pb2.Table.FromString, + } + cardinalities = { + 'CreateTable': cardinality.Cardinality.UNARY_UNARY, + 'DeleteTable': cardinality.Cardinality.UNARY_UNARY, + 'DropRowRange': cardinality.Cardinality.UNARY_UNARY, + 'GetTable': cardinality.Cardinality.UNARY_UNARY, + 'ListTables': cardinality.Cardinality.UNARY_UNARY, + 'ModifyColumnFamilies': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.v2.BigtableTableAdmin', cardinalities, options=stub_options) # @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/client.py b/gcloud/bigtable/client.py index 0b97922894e4..2ec8f6e89bd2 100644 --- a/gcloud/bigtable/client.py +++ b/gcloud/bigtable/client.py @@ -18,8 +18,8 @@ In the hierarchy of API concepts -* a :class:`Client` owns a :class:`.Cluster` -* a :class:`.Cluster` owns a :class:`Table ` +* a :class:`Client` owns a :class:`.Instance` +* a :class:`.Instance` owns a :class:`Table ` * a :class:`Table ` owns a :class:`ColumnFamily <.column_family.ColumnFamily>` * a :class:`Table ` owns a :class:`Row <.row.Row>` @@ -31,52 +31,47 @@ from grpc.beta import implementations -# Cluster admin service is V1-only (V2 provides instance admin instead) -from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as cluster_data_v1_pb2) -from gcloud.bigtable._generated import ( - bigtable_cluster_service_pb2 as cluster_service_v1_pb2) -from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as cluster_messages_v1_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as instance_admin_v2_pb2) # V1 table admin service -from gcloud.bigtable._generated import ( - bigtable_table_service_pb2 as table_service_v1_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) # V1 data service -from gcloud.bigtable._generated import ( - bigtable_service_pb2 as data_service_v1_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_pb2 as data_v2_pb2) from gcloud.bigtable._generated import ( operations_grpc_pb2 as operations_grpc_v1_pb2) -from gcloud.bigtable.cluster import Cluster +from gcloud.bigtable.instance import Instance from gcloud.client import _ClientFactoryMixin from gcloud.client import _ClientProjectMixin from gcloud.credentials import get_credentials -TABLE_STUB_FACTORY_V1 = ( - table_service_v1_pb2.beta_create_BigtableTableService_stub) -TABLE_ADMIN_HOST_V1 = 'bigtabletableadmin.googleapis.com' +TABLE_STUB_FACTORY_V2 = ( + table_admin_v2_pb2.beta_create_BigtableTableAdmin_stub) +TABLE_ADMIN_HOST_V2 = 'bigtabletableadmin.googleapis.com' """Table Admin API request host.""" -TABLE_ADMIN_PORT_V1 = 443 +TABLE_ADMIN_PORT_V2 = 443 """Table Admin API request port.""" -CLUSTER_STUB_FACTORY_V1 = ( - cluster_service_v1_pb2.beta_create_BigtableClusterService_stub) -CLUSTER_ADMIN_HOST_V1 = 'bigtableclusteradmin.googleapis.com' +INSTANCE_STUB_FACTORY_V2 = ( + instance_admin_v2_pb2.beta_create_BigtableInstanceAdmin_stub) +INSTANCE_ADMIN_HOST_V2 = 'bigtableclusteradmin.googleapis.com' """Cluster Admin API request host.""" -CLUSTER_ADMIN_PORT_V1 = 443 +INSTANCE_ADMIN_PORT_V2 = 443 """Cluster Admin API request port.""" -DATA_STUB_FACTORY_V1 = data_service_v1_pb2.beta_create_BigtableService_stub -DATA_API_HOST_V1 = 'bigtable.googleapis.com' +DATA_STUB_FACTORY_V2 = data_v2_pb2.beta_create_Bigtable_stub +DATA_API_HOST_V2 = 'bigtable.googleapis.com' """Data API request host.""" -DATA_API_PORT_V1 = 443 +DATA_API_PORT_V2 = 443 """Data API request port.""" -OPERATIONS_STUB_FACTORY_V1 = operations_grpc_v1_pb2.beta_create_Operations_stub -OPERATIONS_API_HOST_V1 = CLUSTER_ADMIN_HOST_V1 -OPERATIONS_API_PORT_V1 = CLUSTER_ADMIN_PORT_V1 +OPERATIONS_STUB_FACTORY_V2 = operations_grpc_v1_pb2.beta_create_Operations_stub +OPERATIONS_API_HOST_V2 = INSTANCE_ADMIN_HOST_V2 +OPERATIONS_API_PORT_V2 = INSTANCE_ADMIN_PORT_V2 ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin' """Scope for interacting with the Cluster Admin and Table Admin APIs.""" @@ -120,7 +115,7 @@ class Client(_ClientFactoryMixin, _ClientProjectMixin): :type admin: bool :param admin: (Optional) Boolean indicating if the client will be used to - interact with the Cluster Admin or Table Admin APIs. This + interact with the Instance Admin or Table Admin APIs. This requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`. :type user_agent: str @@ -203,7 +198,7 @@ def credentials(self): @property def project_name(self): - """Project name to be used with Cluster Admin API. + """Project name to be used with Instance Admin API. .. note:: @@ -235,7 +230,7 @@ def _data_stub(self): @property def _cluster_stub(self): - """Getter for the gRPC stub used for the Cluster Admin API. + """Getter for the gRPC stub used for the Instance Admin API. :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. @@ -287,29 +282,29 @@ def _make_data_stub(self): :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, DATA_STUB_FACTORY_V1, - DATA_API_HOST_V1, DATA_API_PORT_V1) + return _make_stub(self, DATA_STUB_FACTORY_V2, + DATA_API_HOST_V2, DATA_API_PORT_V2) def _make_cluster_stub(self): - """Creates gRPC stub to make requests to the Cluster Admin API. + """Creates gRPC stub to make requests to the Instance Admin API. :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, CLUSTER_STUB_FACTORY_V1, - CLUSTER_ADMIN_HOST_V1, CLUSTER_ADMIN_PORT_V1) + return _make_stub(self, INSTANCE_STUB_FACTORY_V2, + INSTANCE_ADMIN_HOST_V2, INSTANCE_ADMIN_PORT_V2) def _make_operations_stub(self): """Creates gRPC stub to make requests to the Operations API. - These are for long-running operations of the Cluster Admin API, + These are for long-running operations of the Instance Admin API, hence the host and port matching. :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, OPERATIONS_STUB_FACTORY_V1, - OPERATIONS_API_HOST_V1, OPERATIONS_API_PORT_V1) + return _make_stub(self, OPERATIONS_STUB_FACTORY_V2, + OPERATIONS_API_HOST_V2, OPERATIONS_API_PORT_V2) def _make_table_stub(self): """Creates gRPC stub to make requests to the Table Admin API. @@ -317,8 +312,8 @@ def _make_table_stub(self): :rtype: :class:`grpc.beta._stub._AutoIntermediary` :returns: A gRPC stub object. """ - return _make_stub(self, TABLE_STUB_FACTORY_V1, - TABLE_ADMIN_HOST_V1, TABLE_ADMIN_PORT_V1) + return _make_stub(self, TABLE_STUB_FACTORY_V2, + TABLE_ADMIN_HOST_V2, TABLE_ADMIN_PORT_V2) def is_started(self): """Check if the client has been started. @@ -380,72 +375,22 @@ def __exit__(self, exc_type, exc_val, exc_t): """Stops the client as a context manager.""" self.stop() - def cluster(self, zone, cluster_id, display_name=None, serve_nodes=3): - """Factory to create a cluster associated with this client. - - :type zone: str - :param zone: The name of the zone where the cluster resides. + def instance(self, instance_id, display_name=None): + """Factory to create a instance associated with this client. - :type cluster_id: str - :param cluster_id: The ID of the cluster. + :type instance_id: str + :param instance_id: The ID of the instance. :type display_name: str - :param display_name: (Optional) The display name for the cluster in the - Cloud Console UI. (Must be between 4 and 30 + :param display_name: (Optional) The display name for the instance in + the Cloud Console UI. (Must be between 4 and 30 characters.) If this value is not set in the - constructor, will fall back to the cluster ID. + constructor, will fall back to the instance ID. - :type serve_nodes: int - :param serve_nodes: (Optional) The number of nodes in the cluster. - Defaults to 3. - - :rtype: :class:`.Cluster` - :returns: The cluster owned by this client. - """ - return Cluster(zone, cluster_id, self, - display_name=display_name, serve_nodes=serve_nodes) - - def list_zones(self): - """Lists zones associated with project. - - :rtype: list - :returns: The names (as :class:`str`) of the zones - :raises: :class:`ValueError ` if one of the - zones is not in ``OK`` state. - """ - request_pb = cluster_messages_v1_pb2.ListZonesRequest( - name=self.project_name) - # We expect a `.cluster_messages_v1_pb2.ListZonesResponse` - list_zones_response = self._cluster_stub.ListZones( - request_pb, self.timeout_seconds) - - result = [] - for zone in list_zones_response.zones: - if zone.status != cluster_data_v1_pb2.Zone.OK: - raise ValueError('Zone %s not in OK state' % ( - zone.display_name,)) - result.append(zone.display_name) - return result - - def list_clusters(self): - """Lists clusters owned by the project. - - :rtype: tuple - :returns: A pair of results, the first is a list of :class:`.Cluster` s - returned and the second is a list of strings (the failed - zones in the request). + :rtype: :class:`.Instance` + :returns: an instance owned by this client. """ - request_pb = cluster_messages_v1_pb2.ListClustersRequest( - name=self.project_name) - # We expect a `.cluster_messages_v1_pb2.ListClustersResponse` - list_clusters_response = self._cluster_stub.ListClusters( - request_pb, self.timeout_seconds) - - failed_zones = [zone.display_name - for zone in list_clusters_response.failed_zones] - clusters = [Cluster.from_pb(cluster_pb, self) - for cluster_pb in list_clusters_response.clusters] - return clusters, failed_zones + return Instance(instance_id, self, display_name=display_name) class _MetadataPlugin(object): diff --git a/gcloud/bigtable/cluster.py b/gcloud/bigtable/cluster.py index 28875730c292..a65a5daa945a 100644 --- a/gcloud/bigtable/cluster.py +++ b/gcloud/bigtable/cluster.py @@ -19,31 +19,21 @@ from google.longrunning import operations_pb2 -from gcloud._helpers import _pb_timestamp_to_datetime -from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) -from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) -from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as table_messages_v1_pb2) -from gcloud.bigtable.table import Table +from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) _CLUSTER_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' - r'zones/(?P[^/]+)/clusters/' + r'instances/(?P[^/]+)/clusters/' r'(?P[a-z][-a-z0-9]*)$') -_OPERATION_NAME_RE = re.compile(r'^operations/projects/([^/]+)/zones/([^/]+)/' - r'clusters/([a-z][-a-z0-9]*)/operations/' - r'(?P\d+)$') -_TYPE_URL_BASE = 'type.googleapis.com/google.bigtable.' -_ADMIN_TYPE_URL_BASE = _TYPE_URL_BASE + 'admin.cluster.v1.' -_CLUSTER_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'CreateClusterMetadata' -_UPDATE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'UpdateClusterMetadata' -_UNDELETE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'UndeleteClusterMetadata' +_OPERATION_NAME_RE = re.compile(r'^operations/' + r'projects/([^/]+)/' + r'instances/([^/]+)/' + r'clusters/([a-z][-a-z0-9]*)/' + r'operations/(?P\d+)$') _TYPE_URL_MAP = { - _CLUSTER_CREATE_METADATA: messages_v1_pb2.CreateClusterMetadata, - _UPDATE_CREATE_METADATA: messages_v1_pb2.UpdateClusterMetadata, - _UNDELETE_CREATE_METADATA: messages_v1_pb2.UndeleteClusterMetadata, } DEFAULT_SERVE_NODES = 3 @@ -56,16 +46,13 @@ def _prepare_create_request(cluster): :type cluster: :class:`Cluster` :param cluster: The cluster to be created. - :rtype: :class:`.messages_v1_pb2.CreateClusterRequest` + :rtype: :class:`.messages_v2_pb2.CreateClusterRequest` :returns: The CreateCluster request object containing the cluster info. """ - zone_full_name = ('projects/' + cluster._client.project + - '/zones/' + cluster.zone) - return messages_v1_pb2.CreateClusterRequest( - name=zone_full_name, + return messages_v2_pb2.CreateClusterRequest( + name=cluster._instance.name, cluster_id=cluster.cluster_id, - cluster=data_v1_pb2.Cluster( - display_name=cluster.display_name, + cluster=data_v2_pb2.Cluster( serve_nodes=cluster.serve_nodes, ), ) @@ -101,9 +88,7 @@ def _process_operation(operation_pb): Create/Update/Undelete cluster request. :rtype: tuple - :returns: A pair of an integer and datetime stamp. The integer is the ID - of the operation (``operation_id``) and the timestamp when - the create operation began (``operation_begin``). + :returns: integer ID of the operation (``operation_id``). :raises: :class:`ValueError ` if the operation name doesn't match the :data:`_OPERATION_NAME_RE` regex. """ @@ -114,11 +99,7 @@ def _process_operation(operation_pb): operation_pb.name) operation_id = int(match.group('operation_id')) - request_metadata = _parse_pb_any_to_native(operation_pb.metadata) - operation_begin = _pb_timestamp_to_datetime( - request_metadata.request_time) - - return operation_id, operation_begin + return operation_id class Operation(object): @@ -134,17 +115,13 @@ class Operation(object): :type op_id: int :param op_id: The ID of the operation. - :type begin: :class:`datetime.datetime` - :param begin: The time when the operation was started. - :type cluster: :class:`Cluster` :param cluster: The cluster that created the operation. """ - def __init__(self, op_type, op_id, begin, cluster=None): + def __init__(self, op_type, op_id, cluster=None): self.op_type = op_type self.op_id = op_id - self.begin = begin self._cluster = cluster self._complete = False @@ -153,7 +130,6 @@ def __eq__(self, other): return False return (other.op_type == self.op_type and other.op_id == self.op_id and - other.begin == self.begin and other._cluster == self._cluster and other._complete == self._complete) @@ -175,8 +151,9 @@ def finished(self): '/operations/%d' % (self.op_id,)) request_pb = operations_pb2.GetOperationRequest(name=operation_name) # We expect a `google.longrunning.operations_pb2.Operation`. - operation_pb = self._cluster._client._operations_stub.GetOperation( - request_pb, self._cluster._client.timeout_seconds) + client = self._cluster._instance._client + operation_pb = client._operations_stub.GetOperation( + request_pb, client.timeout_seconds) if operation_pb.done: self._complete = True @@ -199,87 +176,67 @@ class Cluster(object): .. note:: For now, we leave out the ``default_storage_type`` (an enum) - which if not sent will end up as :data:`.data_v1_pb2.STORAGE_SSD`. - - :type zone: str - :param zone: The name of the zone where the cluster resides. + which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`. :type cluster_id: str :param cluster_id: The ID of the cluster. - :type client: :class:`Client ` - :param client: The client that owns the cluster. Provides - authorization and a project ID. - - :type display_name: str - :param display_name: (Optional) The display name for the cluster in the - Cloud Console UI. (Must be between 4 and 30 - characters.) If this value is not set in the - constructor, will fall back to the cluster ID. + :type instance: :class:`.instance.Instance` + :param instance: The instance where the cluster resides. :type serve_nodes: int :param serve_nodes: (Optional) The number of nodes in the cluster. Defaults to :data:`DEFAULT_SERVE_NODES`. """ - def __init__(self, zone, cluster_id, client, - display_name=None, serve_nodes=DEFAULT_SERVE_NODES): - self.zone = zone + def __init__(self, cluster_id, instance, + serve_nodes=DEFAULT_SERVE_NODES): self.cluster_id = cluster_id - self.display_name = display_name or cluster_id + self._instance = instance self.serve_nodes = serve_nodes - self._client = client - - def table(self, table_id): - """Factory to create a table associated with this cluster. - - :type table_id: str - :param table_id: The ID of the table. - - :rtype: :class:`Table ` - :returns: The table owned by this cluster. - """ - return Table(table_id, self) + self.location = None def _update_from_pb(self, cluster_pb): """Refresh self from the server-provided protobuf. Helper for :meth:`from_pb` and :meth:`reload`. """ - if not cluster_pb.display_name: # Simple field (string) - raise ValueError('Cluster protobuf does not contain display_name') if not cluster_pb.serve_nodes: # Simple field (int32) raise ValueError('Cluster protobuf does not contain serve_nodes') - self.display_name = cluster_pb.display_name self.serve_nodes = cluster_pb.serve_nodes + self.location = cluster_pb.location @classmethod - def from_pb(cls, cluster_pb, client): + def from_pb(cls, cluster_pb, instance): """Creates a cluster instance from a protobuf. :type cluster_pb: :class:`bigtable_cluster_data_pb2.Cluster` :param cluster_pb: A cluster protobuf object. - :type client: :class:`Client ` - :param client: The client that owns the cluster. + :type instance: :class:`.instance.Instance>` + :param instance: The instance that owns the cluster. :rtype: :class:`Cluster` :returns: The cluster parsed from the protobuf response. - :raises: :class:`ValueError ` if the cluster - name does not match - ``projects/{project}/zones/{zone}/clusters/{cluster_id}`` - or if the parsed project ID does not match the project ID - on the client. + :raises: + :class:`ValueError ` if the cluster + name does not match + ``projects/{project}/instances/{instance}/clusters/{cluster_id}`` + or if the parsed project ID does not match the project ID + on the client. """ match = _CLUSTER_NAME_RE.match(cluster_pb.name) if match is None: raise ValueError('Cluster protobuf name was not in the ' 'expected format.', cluster_pb.name) - if match.group('project') != client.project: + if match.group('project') != instance._client.project: raise ValueError('Project ID on cluster does not match the ' 'project ID on the client') + if match.group('instance') != instance.instance_id: + raise ValueError('Instance ID on cluster does not match the ' + 'instance ID on the client') - result = cls(match.group('zone'), match.group('cluster_id'), client) + result = cls(match.group('cluster_id'), instance) result._update_from_pb(cluster_pb) return result @@ -292,9 +249,8 @@ def copy(self): :rtype: :class:`.Cluster` :returns: A copy of the current cluster. """ - new_client = self._client.copy() - return self.__class__(self.zone, self.cluster_id, new_client, - display_name=self.display_name, + new_instance = self._instance.copy() + return self.__class__(self.cluster_id, new_instance, serve_nodes=self.serve_nodes) @property @@ -302,43 +258,41 @@ def name(self): """Cluster name used in requests. .. note:: - This property will not change if ``zone`` and ``cluster_id`` do not, - but the return value is not cached. + This property will not change if ``_instance`` and ``cluster_id`` + do not, but the return value is not cached. The cluster name is of the form - ``"projects/{project}/zones/{zone}/clusters/{cluster_id}"`` + ``"projects/{project}/instances/{instance}/clusters/{cluster_id}"`` :rtype: str :returns: The cluster name. """ - return (self._client.project_name + '/zones/' + self.zone + - '/clusters/' + self.cluster_id) + return self._instance.name + '/clusters/' + self.cluster_id def __eq__(self, other): if not isinstance(other, self.__class__): return False # NOTE: This does not compare the configuration values, such as - # the serve_nodes or display_name. Instead, it only compares - # identifying values zone, cluster ID and client. This is + # the serve_nodes. Instead, it only compares + # identifying values instance, cluster ID and client. This is # intentional, since the same cluster can be in different states - # if not synchronized. Clusters with similar zone/cluster + # if not synchronized. Clusters with similar instance/cluster # settings but different clients can't be used in the same way. - return (other.zone == self.zone and - other.cluster_id == self.cluster_id and - other._client == self._client) + return (other.cluster_id == self.cluster_id and + other._instance == self._instance) def __ne__(self, other): return not self.__eq__(other) def reload(self): """Reload the metadata for this cluster.""" - request_pb = messages_v1_pb2.GetClusterRequest(name=self.name) + request_pb = messages_v2_pb2.GetClusterRequest(name=self.name) # We expect a `._generated.bigtable_cluster_data_pb2.Cluster`. - cluster_pb = self._client._cluster_stub.GetCluster( - request_pb, self._client.timeout_seconds) + cluster_pb = self._instance._client._cluster_stub.GetCluster( + request_pb, self._instance._client.timeout_seconds) - # NOTE: _update_from_pb does not check that the project, zone and + # NOTE: _update_from_pb does not check that the project, instance and # cluster ID on the response match the request. self._update_from_pb(cluster_pb) @@ -347,14 +301,13 @@ def create(self): .. note:: - Uses the ``project``, ``zone`` and ``cluster_id`` on the current - :class:`Cluster` in addition to the ``display_name`` and - ``serve_nodes``. If you'd like to change them before creating, - reset the values via + Uses the ``project``, ``instance`` and ``cluster_id`` on the + current :class:`Cluster` in addition to the ``serve_nodes``. + To change them before creating, reset the values via .. code:: python - cluster.display_name = 'New display name' + cluster.serve_nodes = 8 cluster.cluster_id = 'i-changed-my-mind' before calling :meth:`create`. @@ -365,24 +318,23 @@ def create(self): """ request_pb = _prepare_create_request(self) # We expect a `google.longrunning.operations_pb2.Operation`. - cluster_pb = self._client._cluster_stub.CreateCluster( - request_pb, self._client.timeout_seconds) + operation_pb = self._instance._client._cluster_stub.CreateCluster( + request_pb, self._instance._client.timeout_seconds) - op_id, op_begin = _process_operation(cluster_pb.current_operation) - return Operation('create', op_id, op_begin, cluster=self) + op_id = _process_operation(operation_pb) + return Operation('create', op_id, cluster=self) def update(self): """Update this cluster. .. note:: - Updates the ``display_name`` and ``serve_nodes``. If you'd like to + Updates the ``serve_nodes``. If you'd like to change them before updating, reset the values via .. code:: python - cluster.display_name = 'New display name' - cluster.serve_nodes = 3 + cluster.serve_nodes = 8 before calling :meth:`update`. @@ -390,17 +342,16 @@ def update(self): :returns: The long-running operation corresponding to the update operation. """ - request_pb = data_v1_pb2.Cluster( + request_pb = data_v2_pb2.Cluster( name=self.name, - display_name=self.display_name, serve_nodes=self.serve_nodes, ) - # We expect a `._generated.bigtable_cluster_data_pb2.Cluster`. - cluster_pb = self._client._cluster_stub.UpdateCluster( - request_pb, self._client.timeout_seconds) + # Ignore expected `._generated.bigtable_cluster_data_pb2.Cluster`. + operation_pb = self._instance._client._cluster_stub.UpdateCluster( + request_pb, self._instance._client.timeout_seconds) - op_id, op_begin = _process_operation(cluster_pb.current_operation) - return Operation('update', op_id, op_begin, cluster=self) + op_id = _process_operation(operation_pb) + return Operation('update', op_id, cluster=self) def delete(self): """Delete this cluster. @@ -427,64 +378,7 @@ def delete(self): irrevocably disappear from the API, and their data will be permanently deleted. """ - request_pb = messages_v1_pb2.DeleteClusterRequest(name=self.name) + request_pb = messages_v2_pb2.DeleteClusterRequest(name=self.name) # We expect a `google.protobuf.empty_pb2.Empty` - self._client._cluster_stub.DeleteCluster( - request_pb, self._client.timeout_seconds) - - def undelete(self): - """Undelete this cluster. - - Cancels the scheduled deletion of an cluster and begins preparing it to - resume serving. The returned operation will also be embedded as the - cluster's ``current_operation``. - - Immediately upon completion of this request: - - * The cluster's ``delete_time`` field will be unset, protecting it from - automatic deletion. - - Until completion of the returned operation: - - * The operation cannot be cancelled. - - Upon completion of the returned operation: - - * Billing for the cluster's resources will resume. - * All tables within the cluster will be available. - - :rtype: :class:`Operation` - :returns: The long-running operation corresponding to the - undelete operation. - """ - request_pb = messages_v1_pb2.UndeleteClusterRequest(name=self.name) - # We expect a `google.longrunning.operations_pb2.Operation`. - operation_pb2 = self._client._cluster_stub.UndeleteCluster( - request_pb, self._client.timeout_seconds) - - op_id, op_begin = _process_operation(operation_pb2) - return Operation('undelete', op_id, op_begin, cluster=self) - - def list_tables(self): - """List the tables in this cluster. - - :rtype: list of :class:`Table ` - :returns: The list of tables owned by the cluster. - :raises: :class:`ValueError ` if one of the - returned tables has a name that is not of the expected format. - """ - request_pb = table_messages_v1_pb2.ListTablesRequest(name=self.name) - # We expect a `table_messages_v1_pb2.ListTablesResponse` - table_list_pb = self._client._table_stub.ListTables( - request_pb, self._client.timeout_seconds) - - result = [] - for table_pb in table_list_pb.tables: - table_prefix = self.name + '/tables/' - if not table_pb.name.startswith(table_prefix): - raise ValueError('Table name %s not of expected format' % ( - table_pb.name,)) - table_id = table_pb.name[len(table_prefix):] - result.append(self.table(table_id)) - - return result + self._instance._client._cluster_stub.DeleteCluster( + request_pb, self._instance._client.timeout_seconds) diff --git a/gcloud/bigtable/instance.py b/gcloud/bigtable/instance.py new file mode 100644 index 000000000000..ba29e43a57c2 --- /dev/null +++ b/gcloud/bigtable/instance.py @@ -0,0 +1,459 @@ +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""User friendly container for Google Cloud Bigtable Instance.""" + + +import re + +from google.longrunning import operations_pb2 + +from gcloud._helpers import _pb_timestamp_to_datetime +from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) +from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_messages_v2_pb2) +from gcloud.bigtable.cluster import Cluster +from gcloud.bigtable.table import Table + + +_INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' + r'instances/(?P[a-z][-a-z0-9]*)$') +_OPERATION_NAME_RE = re.compile(r'^operations/projects/([^/]+)/' + r'instances/([a-z][-a-z0-9]*)/operations/' + r'(?P\d+)$') +_TYPE_URL_BASE = 'type.googleapis.com/google.bigtable.' +_ADMIN_TYPE_URL_BASE = _TYPE_URL_BASE + 'admin.v2.' +_INSTANCE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'CreateInstanceMetadata' +_TYPE_URL_MAP = { + _INSTANCE_CREATE_METADATA: messages_v2_pb2.CreateInstanceMetadata, +} + + +def _prepare_create_request(instance): + """Creates a protobuf request for a CreateInstance request. + + :type instance: :class:`Instance` + :param instance: The instance to be created. + + :rtype: :class:`.messages_v2_pb2.CreateInstanceRequest` + :returns: The CreateInstance request object containing the instance info. + """ + parent_name = ('projects/' + instance._client.project) + return messages_v2_pb2.CreateInstanceRequest( + name=parent_name, + instance_id=instance.instance_id, + instance=data_v2_pb2.Instance( + display_name=instance.display_name, + ), + ) + + +def _parse_pb_any_to_native(any_val, expected_type=None): + """Convert a serialized "google.protobuf.Any" value to actual type. + + :type any_val: :class:`google.protobuf.any_pb2.Any` + :param any_val: A serialized protobuf value container. + + :type expected_type: str + :param expected_type: (Optional) The type URL we expect ``any_val`` + to have. + + :rtype: object + :returns: The de-serialized object. + :raises: :class:`ValueError ` if the + ``expected_type`` does not match the ``type_url`` on the input. + """ + if expected_type is not None and expected_type != any_val.type_url: + raise ValueError('Expected type: %s, Received: %s' % ( + expected_type, any_val.type_url)) + container_class = _TYPE_URL_MAP[any_val.type_url] + return container_class.FromString(any_val.value) + + +def _process_operation(operation_pb): + """Processes a create protobuf response. + + :type operation_pb: :class:`google.longrunning.operations_pb2.Operation` + :param operation_pb: The long-running operation response from a + Create/Update/Undelete instance request. + + :rtype: tuple + :returns: A pair of an integer and datetime stamp. The integer is the ID + of the operation (``operation_id``) and the timestamp when + the create operation began (``operation_begin``). + :raises: :class:`ValueError ` if the operation name + doesn't match the :data:`_OPERATION_NAME_RE` regex. + """ + match = _OPERATION_NAME_RE.match(operation_pb.name) + if match is None: + raise ValueError('Operation name was not in the expected ' + 'format after a instance modification.', + operation_pb.name) + operation_id = int(match.group('operation_id')) + + request_metadata = _parse_pb_any_to_native(operation_pb.metadata) + operation_begin = _pb_timestamp_to_datetime( + request_metadata.request_time) + + return operation_id, operation_begin + + +class Operation(object): + """Representation of a Google API Long-Running Operation. + + In particular, these will be the result of operations on + instances using the Cloud Bigtable API. + + :type op_type: str + :param op_type: The type of operation being performed. Expect + ``create``, ``update`` or ``undelete``. + + :type op_id: int + :param op_id: The ID of the operation. + + :type begin: :class:`datetime.datetime` + :param begin: The time when the operation was started. + + :type instance: :class:`Instance` + :param instance: The instance that created the operation. + """ + + def __init__(self, op_type, op_id, begin, instance=None): + self.op_type = op_type + self.op_id = op_id + self.begin = begin + self._instance = instance + self._complete = False + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.op_type == self.op_type and + other.op_id == self.op_id and + other.begin == self.begin and + other._instance == self._instance and + other._complete == self._complete) + + def __ne__(self, other): + return not self.__eq__(other) + + def finished(self): + """Check if the operation has finished. + + :rtype: bool + :returns: A boolean indicating if the current operation has completed. + :raises: :class:`ValueError ` if the operation + has already completed. + """ + if self._complete: + raise ValueError('The operation has completed.') + + operation_name = ('operations/' + self._instance.name + + '/operations/%d' % (self.op_id,)) + request_pb = operations_pb2.GetOperationRequest(name=operation_name) + # We expect a `google.longrunning.operations_pb2.Operation`. + operation_pb = self._instance._client._operations_stub.GetOperation( + request_pb, self._instance._client.timeout_seconds) + + if operation_pb.done: + self._complete = True + return True + else: + return False + + +class Instance(object): + """Representation of a Google Cloud Bigtable Instance. + + We can use a :class:`Instance` to: + + * :meth:`reload` itself + * :meth:`create` itself + * :meth:`update` itself + * :meth:`delete` itself + * :meth:`undelete` itself + + .. note:: + + For now, we leave out the ``default_storage_type`` (an enum) + which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`. + + :type instance_id: str + :param instance_id: The ID of the instance. + + :type client: :class:`Client ` + :param client: The client that owns the instance. Provides + authorization and a project ID. + + :type display_name: str + :param display_name: (Optional) The display name for the instance in the + Cloud Console UI. (Must be between 4 and 30 + characters.) If this value is not set in the + constructor, will fall back to the instance ID. + """ + + def __init__(self, instance_id, client, + display_name=None): + self.instance_id = instance_id + self.display_name = display_name or instance_id + self._client = client + + def _update_from_pb(self, instance_pb): + """Refresh self from the server-provided protobuf. + + Helper for :meth:`from_pb` and :meth:`reload`. + """ + if not instance_pb.display_name: # Simple field (string) + raise ValueError('Instance protobuf does not contain display_name') + self.display_name = instance_pb.display_name + + @classmethod + def from_pb(cls, instance_pb, client): + """Creates a instance instance from a protobuf. + + :type instance_pb: :class:`instance_pb2.Instance` + :param instance_pb: A instance protobuf object. + + :type client: :class:`Client ` + :param client: The client that owns the instance. + + :rtype: :class:`Instance` + :returns: The instance parsed from the protobuf response. + :raises: :class:`ValueError ` if the instance + name does not match + ``projects/{project}/instances/{instance_id}`` + or if the parsed project ID does not match the project ID + on the client. + """ + match = _INSTANCE_NAME_RE.match(instance_pb.name) + if match is None: + raise ValueError('Instance protobuf name was not in the ' + 'expected format.', instance_pb.name) + if match.group('project') != client.project: + raise ValueError('Project ID on instance does not match the ' + 'project ID on the client') + + result = cls(match.group('instance_id'), client) + result._update_from_pb(instance_pb) + return result + + def copy(self): + """Make a copy of this instance. + + Copies the local data stored as simple types and copies the client + attached to this instance. + + :rtype: :class:`.Instance` + :returns: A copy of the current instance. + """ + new_client = self._client.copy() + return self.__class__(self.instance_id, new_client, + display_name=self.display_name) + + @property + def name(self): + """Instance name used in requests. + + .. note:: + This property will not change if ``instance_id`` does not, + but the return value is not cached. + + The instance name is of the form + + ``"projects/{project}/instances/{instance_id}"`` + + :rtype: str + :returns: The instance name. + """ + return self._client.project_name + '/instances/' + self.instance_id + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + # NOTE: This does not compare the configuration values, such as + # the display_name. Instead, it only compares + # identifying values instance ID and client. This is + # intentional, since the same instance can be in different states + # if not synchronized. Instances with similar instance + # settings but different clients can't be used in the same way. + return (other.instance_id == self.instance_id and + other._client == self._client) + + def __ne__(self, other): + return not self.__eq__(other) + + def reload(self): + """Reload the metadata for this instance.""" + request_pb = messages_v2_pb2.GetInstanceRequest(name=self.name) + # We expect `data_v2_pb2.Instance`. + instance_pb = self._client._instance_stub.GetInstance( + request_pb, self._client.timeout_seconds) + + # NOTE: _update_from_pb does not check that the project and + # instance ID on the response match the request. + self._update_from_pb(instance_pb) + + def create(self): + """Create this instance. + + .. note:: + + Uses the ``project`` and ``instance_id`` on the current + :class:`Instance` in addition to the ``display_name``. + To change them before creating, reset the values via + + .. code:: python + + instance.display_name = 'New display name' + instance.instance_id = 'i-changed-my-mind' + + before calling :meth:`create`. + + :rtype: :class:`Operation` + :returns: The long-running operation corresponding to the + create operation. + """ + request_pb = _prepare_create_request(self) + # We expect a `google.longrunning.operations_pb2.Operation`. + operation_pb = self._client._instance_stub.CreateInstance( + request_pb, self._client.timeout_seconds) + + op_id, op_begin = _process_operation(operation_pb) + return Operation('create', op_id, op_begin, instance=self) + + def update(self): + """Update this instance. + + .. note:: + + Updates the ``display_name``. To change that value before + updating, reset its values via + + .. code:: python + + instance.display_name = 'New display name' + + before calling :meth:`update`. + """ + request_pb = data_v2_pb2.Instance( + name=self.name, + display_name=self.display_name, + ) + # Ignore the expected `data_v2_pb2.Instance`. + self._client._instance_stub.UpdateInstance( + request_pb, self._client.timeout_seconds) + + def delete(self): + """Delete this instance. + + Marks a instance and all of its tables for permanent deletion + in 7 days. + + Immediately upon completion of the request: + + * Billing will cease for all of the instance's reserved resources. + * The instance's ``delete_time`` field will be set 7 days in + the future. + + Soon afterward: + + * All tables within the instance will become unavailable. + + Prior to the instance's ``delete_time``: + + * The instance can be recovered with a call to ``UndeleteInstance``. + * All other attempts to modify or delete the instance will be rejected. + + At the instance's ``delete_time``: + + * The instance and **all of its tables** will immediately and + irrevocably disappear from the API, and their data will be + permanently deleted. + """ + request_pb = messages_v2_pb2.DeleteInstanceRequest(name=self.name) + # We expect a `google.protobuf.empty_pb2.Empty` + self._client._instance_stub.DeleteInstance( + request_pb, self._client.timeout_seconds) + + def cluster(self, cluster_id, serve_nodes=3): + """Factory to create a cluster associated with this client. + + :type cluster_id: str + :param cluster_id: The ID of the cluster. + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the cluster. + Defaults to 3. + + :rtype: :class:`.Cluster` + :returns: The cluster owned by this client. + """ + return Cluster(cluster_id, self, serve_nodes=serve_nodes) + + def list_clusters(self): + """Lists clusters in this instance. + + :rtype: tuple + :returns: A pair of results, the first is a list of :class:`.Cluster` s + returned and the second is a list of strings (the failed + locations in the request). + """ + request_pb = messages_v2_pb2.ListClustersRequest(name=self.name) + # We expect a `.cluster_messages_v1_pb2.ListClustersResponse` + list_clusters_response = self._client._instance_stub.ListClusters( + request_pb, self._client.timeout_seconds) + + failed_locations = [ + location for location in list_clusters_response.failed_locations] + clusters = [Cluster.from_pb(cluster_pb, self) + for cluster_pb in list_clusters_response.clusters] + return clusters, failed_locations + + def table(self, table_id): + """Factory to create a table associated with this instance. + + :type table_id: str + :param table_id: The ID of the table. + + :rtype: :class:`Table ` + :returns: The table owned by this instance. + """ + return Table(table_id, self) + + def list_tables(self): + """List the tables in this instance. + + :rtype: list of :class:`Table ` + :returns: The list of tables owned by the instance. + :raises: :class:`ValueError ` if one of the + returned tables has a name that is not of the expected format. + """ + request_pb = table_messages_v2_pb2.ListTablesRequest(name=self.name) + # We expect a `table_messages_v2_pb2.ListTablesResponse` + table_list_pb = self._client._table_stub.ListTables( + request_pb, self._client.timeout_seconds) + + result = [] + for table_pb in table_list_pb.tables: + table_prefix = self.name + '/tables/' + if not table_pb.name.startswith(table_prefix): + raise ValueError('Table name %s not of expected format' % ( + table_pb.name,)) + table_id = table_pb.name[len(table_prefix):] + result.append(self.table(table_id)) + + return result diff --git a/gcloud/bigtable/table.py b/gcloud/bigtable/table.py index 83182d9f2a04..159fc4566c42 100644 --- a/gcloud/bigtable/table.py +++ b/gcloud/bigtable/table.py @@ -52,13 +52,13 @@ class Table(object): :type table_id: str :param table_id: The ID of the table. - :type cluster: :class:`Cluster <.cluster.Cluster>` - :param cluster: The cluster that owns the table. + :type instance: :class:`Cluster <.instance.Instance>` + :param instance: The instance that owns the table. """ - def __init__(self, table_id, cluster): + def __init__(self, table_id, instance): self.table_id = table_id - self._cluster = cluster + self._instance = instance @property def name(self): @@ -76,7 +76,7 @@ def name(self): :rtype: str :returns: The table name. """ - return self._cluster.name + '/tables/' + self.table_id + return self._instance.name + '/tables/' + self.table_id def column_family(self, column_family_id, gc_rule=None): """Factory to create a column family associated with this table. @@ -131,7 +131,7 @@ def __eq__(self, other): if not isinstance(other, self.__class__): return False return (other.table_id == self.table_id and - other._cluster == self._cluster) + other._instance == self._instance) def __ne__(self, other): return not self.__eq__(other) @@ -170,10 +170,10 @@ def create(self, initial_split_keys=None): split_pb(key=key) for key in initial_split_keys] request_pb = table_admin_messages_v2_pb2.CreateTableRequest( initial_splits=initial_split_keys or [], - name=self._cluster.name, + name=self._instance.name, table_id=self.table_id, ) - client = self._cluster._client + client = self._instance._client # We expect a `._generated.bigtable_table_data_pb2.Table` client._table_stub.CreateTable(request_pb, client.timeout_seconds) @@ -181,7 +181,7 @@ def delete(self): """Delete this table.""" request_pb = table_admin_messages_v2_pb2.DeleteTableRequest( name=self.name) - client = self._cluster._client + client = self._instance._client # We expect a `google.protobuf.empty_pb2.Empty` client._table_stub.DeleteTable(request_pb, client.timeout_seconds) @@ -198,7 +198,7 @@ def list_column_families(self): """ request_pb = table_admin_messages_v2_pb2.GetTableRequest( name=self.name) - client = self._cluster._client + client = self._instance._client # We expect a `._generated.bigtable_table_data_pb2.Table` table_pb = client._table_stub.GetTable(request_pb, client.timeout_seconds) @@ -229,7 +229,7 @@ def read_row(self, row_key, filter_=None): """ request_pb = _create_row_request(self.name, row_key=row_key, filter_=filter_) - client = self._cluster._client + client = self._instance._client response_iterator = client._data_stub.ReadRows(request_pb, client.timeout_seconds) rows_data = PartialRowsData(response_iterator) @@ -273,7 +273,7 @@ def read_rows(self, start_key=None, end_key=None, limit=None, request_pb = _create_row_request( self.name, start_key=start_key, end_key=end_key, filter_=filter_, limit=limit) - client = self._cluster._client + client = self._instance._client response_iterator = client._data_stub.ReadRows(request_pb, client.timeout_seconds) # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` @@ -312,7 +312,7 @@ def sample_row_keys(self): """ request_pb = data_messages_v2_pb2.SampleRowKeysRequest( table_name=self.name) - client = self._cluster._client + client = self._instance._client response_iterator = client._data_stub.SampleRowKeys( request_pb, client.timeout_seconds) return response_iterator diff --git a/gcloud/bigtable/test_client.py b/gcloud/bigtable/test_client.py index ccd276fdb750..eeff14a5aab4 100644 --- a/gcloud/bigtable/test_client.py +++ b/gcloud/bigtable/test_client.py @@ -289,9 +289,9 @@ def test_table_stub_unset_failure(self): def test__make_data_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import DATA_API_HOST_V1 - from gcloud.bigtable.client import DATA_API_PORT_V1 - from gcloud.bigtable.client import DATA_STUB_FACTORY_V1 + from gcloud.bigtable.client import DATA_API_HOST_V2 + from gcloud.bigtable.client import DATA_API_PORT_V2 + from gcloud.bigtable.client import DATA_STUB_FACTORY_V2 credentials = _Credentials() project = 'PROJECT' @@ -311,18 +311,18 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - DATA_STUB_FACTORY_V1, - DATA_API_HOST_V1, - DATA_API_PORT_V1, + DATA_STUB_FACTORY_V2, + DATA_API_HOST_V2, + DATA_API_PORT_V2, ), ]) def test__make_cluster_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import CLUSTER_ADMIN_HOST_V1 - from gcloud.bigtable.client import CLUSTER_ADMIN_PORT_V1 - from gcloud.bigtable.client import CLUSTER_STUB_FACTORY_V1 + from gcloud.bigtable.client import INSTANCE_ADMIN_HOST_V2 + from gcloud.bigtable.client import INSTANCE_ADMIN_PORT_V2 + from gcloud.bigtable.client import INSTANCE_STUB_FACTORY_V2 credentials = _Credentials() project = 'PROJECT' @@ -342,18 +342,18 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - CLUSTER_STUB_FACTORY_V1, - CLUSTER_ADMIN_HOST_V1, - CLUSTER_ADMIN_PORT_V1, + INSTANCE_STUB_FACTORY_V2, + INSTANCE_ADMIN_HOST_V2, + INSTANCE_ADMIN_PORT_V2, ), ]) def test__make_operations_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import OPERATIONS_API_HOST_V1 - from gcloud.bigtable.client import OPERATIONS_API_PORT_V1 - from gcloud.bigtable.client import OPERATIONS_STUB_FACTORY_V1 + from gcloud.bigtable.client import OPERATIONS_API_HOST_V2 + from gcloud.bigtable.client import OPERATIONS_API_PORT_V2 + from gcloud.bigtable.client import OPERATIONS_STUB_FACTORY_V2 credentials = _Credentials() project = 'PROJECT' @@ -373,18 +373,18 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - OPERATIONS_STUB_FACTORY_V1, - OPERATIONS_API_HOST_V1, - OPERATIONS_API_PORT_V1, + OPERATIONS_STUB_FACTORY_V2, + OPERATIONS_API_HOST_V2, + OPERATIONS_API_PORT_V2, ), ]) def test__make_table_stub(self): from gcloud._testing import _Monkey from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import TABLE_ADMIN_HOST_V1 - from gcloud.bigtable.client import TABLE_ADMIN_PORT_V1 - from gcloud.bigtable.client import TABLE_STUB_FACTORY_V1 + from gcloud.bigtable.client import TABLE_ADMIN_HOST_V2 + from gcloud.bigtable.client import TABLE_ADMIN_PORT_V2 + from gcloud.bigtable.client import TABLE_STUB_FACTORY_V2 credentials = _Credentials() project = 'PROJECT' @@ -404,9 +404,9 @@ def mock_make_stub(*args): self.assertEqual(make_stub_args, [ ( client, - TABLE_STUB_FACTORY_V1, - TABLE_ADMIN_HOST_V1, - TABLE_ADMIN_PORT_V1, + TABLE_STUB_FACTORY_V2, + TABLE_ADMIN_HOST_V2, + TABLE_ADMIN_PORT_V2, ), ]) @@ -521,144 +521,21 @@ def test_stop_while_stopped(self): # Make sure the cluster stub did not change. self.assertEqual(client._cluster_stub_internal, cluster_stub) - def test_cluster_factory(self): - from gcloud.bigtable.cluster import Cluster + def test_instance_factory(self): + from gcloud.bigtable.instance import Instance - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) - - zone = 'zone' - cluster_id = 'cluster-id' - display_name = 'display-name' - serve_nodes = 42 - cluster = client.cluster(zone, cluster_id, display_name=display_name, - serve_nodes=serve_nodes) - self.assertTrue(isinstance(cluster, Cluster)) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) - self.assertEqual(cluster.display_name, display_name) - self.assertEqual(cluster.serve_nodes, serve_nodes) - self.assertTrue(cluster._client is client) - - def _list_zones_helper(self, zone_status): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) - from gcloud.bigtable._testing import _FakeStub + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + DISPLAY_NAME = 'display-name' credentials = _Credentials() - project = 'PROJECT' - timeout_seconds = 281330 - client = self._makeOne(project=project, credentials=credentials, - admin=True, timeout_seconds=timeout_seconds) - - # Create request_pb - request_pb = messages_v1_pb2.ListZonesRequest( - name='projects/' + project, - ) - - # Create response_pb - zone1 = 'foo' - zone2 = 'bar' - response_pb = messages_v1_pb2.ListZonesResponse( - zones=[ - data_v1_pb2.Zone(display_name=zone1, status=zone_status), - data_v1_pb2.Zone(display_name=zone2, status=zone_status), - ], - ) - - # Patch the stub used by the API method. - client._cluster_stub_internal = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = [zone1, zone2] - - # Perform the method and check the result. - result = client.list_zones() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ListZones', - (request_pb, timeout_seconds), - {}, - )]) - - def test_list_zones(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - self._list_zones_helper(data_v1_pb2.Zone.OK) - - def test_list_zones_failure(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - with self.assertRaises(ValueError): - self._list_zones_helper(data_v1_pb2.Zone.EMERGENCY_MAINENANCE) - - def test_list_clusters(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) - from gcloud.bigtable._testing import _FakeStub + client = self._makeOne(project=PROJECT, credentials=credentials) - credentials = _Credentials() - project = 'PROJECT' - timeout_seconds = 8004 - client = self._makeOne(project=project, credentials=credentials, - admin=True, timeout_seconds=timeout_seconds) - - # Create request_pb - request_pb = messages_v1_pb2.ListClustersRequest( - name='projects/' + project, - ) - - # Create response_pb - zone = 'foo' - failed_zone = 'bar' - cluster_id1 = 'cluster-id1' - cluster_id2 = 'cluster-id2' - cluster_name1 = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id1) - cluster_name2 = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id2) - response_pb = messages_v1_pb2.ListClustersResponse( - failed_zones=[ - data_v1_pb2.Zone(display_name=failed_zone), - ], - clusters=[ - data_v1_pb2.Cluster( - name=cluster_name1, - display_name=cluster_name1, - serve_nodes=3, - ), - data_v1_pb2.Cluster( - name=cluster_name2, - display_name=cluster_name2, - serve_nodes=3, - ), - ], - ) - - # Patch the stub used by the API method. - client._cluster_stub_internal = stub = _FakeStub(response_pb) - - # Create expected_result. - failed_zones = [failed_zone] - clusters = [ - client.cluster(zone, cluster_id1), - client.cluster(zone, cluster_id2), - ] - expected_result = (clusters, failed_zones) - - # Perform the method and check the result. - result = client.list_clusters() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ListClusters', - (request_pb, timeout_seconds), - {}, - )]) + instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME) + self.assertTrue(isinstance(instance, Instance)) + self.assertEqual(instance.instance_id, INSTANCE_ID) + self.assertEqual(instance.display_name, DISPLAY_NAME) + self.assertTrue(instance._client is client) class Test_MetadataPlugin(unittest2.TestCase): diff --git a/gcloud/bigtable/test_cluster.py b/gcloud/bigtable/test_cluster.py index 294f9a0d0f55..6ddca98bf92e 100644 --- a/gcloud/bigtable/test_cluster.py +++ b/gcloud/bigtable/test_cluster.py @@ -26,15 +26,12 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def _constructor_test_helper(self, cluster=None): - import datetime op_type = 'fake-op' op_id = 8915 - begin = datetime.datetime(2015, 10, 22, 1, 1) - operation = self._makeOne(op_type, op_id, begin, cluster=cluster) + operation = self._makeOne(op_type, op_id, cluster=cluster) self.assertEqual(operation.op_type, op_type) self.assertEqual(operation.op_id, op_id) - self.assertEqual(operation.begin, begin) self.assertEqual(operation._cluster, cluster) self.assertFalse(operation._complete) @@ -46,13 +43,11 @@ def test_constructor_explicit_cluster(self): self._constructor_test_helper(cluster=cluster) def test___eq__(self): - import datetime op_type = 'fake-op' op_id = 8915 - begin = datetime.datetime(2015, 10, 22, 1, 1) cluster = object() - operation1 = self._makeOne(op_type, op_id, begin, cluster=cluster) - operation2 = self._makeOne(op_type, op_id, begin, cluster=cluster) + operation1 = self._makeOne(op_type, op_id, cluster=cluster) + operation2 = self._makeOne(op_type, op_id, cluster=cluster) self.assertEqual(operation1, operation2) def test___eq__type_differ(self): @@ -61,13 +56,11 @@ def test___eq__type_differ(self): self.assertNotEqual(operation1, operation2) def test___ne__same_value(self): - import datetime op_type = 'fake-op' op_id = 8915 - begin = datetime.datetime(2015, 10, 22, 1, 1) cluster = object() - operation1 = self._makeOne(op_type, op_id, begin, cluster=cluster) - operation2 = self._makeOne(op_type, op_id, begin, cluster=cluster) + operation1 = self._makeOne(op_type, op_id, cluster=cluster) + operation2 = self._makeOne(op_type, op_id, cluster=cluster) comparison_val = (operation1 != operation2) self.assertFalse(comparison_val) @@ -83,27 +76,27 @@ def test_finished_without_operation(self): operation.finished() def _finished_helper(self, done): - import datetime from google.longrunning import operations_pb2 from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable.cluster import Cluster - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - op_type = 'fake-op' - op_id = 789 - begin = datetime.datetime(2015, 10, 22, 1, 1) + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + CLUSTER_ID = 'cluster-id' + OP_TYPE = 'fake-op' + OP_ID = 789 timeout_seconds = 1 - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = Cluster(zone, cluster_id, client) - operation = self._makeOne(op_type, op_id, begin, cluster=cluster) + client = _Client(PROJECT, timeout_seconds=timeout_seconds) + instance = _Instance(INSTANCE_ID, client) + cluster = Cluster(CLUSTER_ID, instance) + operation = self._makeOne(OP_TYPE, OP_ID, cluster=cluster) # Create request_pb - op_name = ('operations/projects/' + project + '/zones/' + - zone + '/clusters/' + cluster_id + - '/operations/%d' % (op_id,)) + op_name = ('operations/projects/' + PROJECT + + '/instances/' + INSTANCE_ID + + '/clusters/' + CLUSTER_ID + + '/operations/%d' % (OP_ID,)) request_pb = operations_pb2.GetOperationRequest(name=op_name) # Create response_pb @@ -139,6 +132,14 @@ def test_finished_not_done(self): class TestCluster(unittest2.TestCase): + PROJECT = 'project' + INSTANCE_ID = 'instance-id' + CLUSTER_ID = 'cluster-id' + CLUSTER_NAME = ('projects/' + PROJECT + + '/instances/' + INSTANCE_ID + + '/clusters/' + CLUSTER_ID) + TIMEOUT_SECONDS = 123 + def _getTargetClass(self): from gcloud.bigtable.cluster import Cluster return Cluster @@ -147,233 +148,176 @@ def _makeOne(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_constructor_defaults(self): - zone = 'zone' - cluster_id = 'cluster-id' - client = object() + from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(zone, cluster_id, client) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) - self.assertEqual(cluster.display_name, cluster_id) - self.assertEqual(cluster.serve_nodes, 3) - self.assertTrue(cluster._client is client) + cluster = self._makeOne(self.CLUSTER_ID, instance) + self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) + self.assertTrue(cluster._instance is instance) + self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) def test_constructor_non_default(self): - zone = 'zone' - cluster_id = 'cluster-id' - display_name = 'display_name' - serve_nodes = 8 - client = object() - - cluster = self._makeOne(zone, cluster_id, client, - display_name=display_name, - serve_nodes=serve_nodes) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) - self.assertEqual(cluster.display_name, display_name) - self.assertEqual(cluster.serve_nodes, serve_nodes) - self.assertTrue(cluster._client is client) + SERVE_NODES = 8 + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster = self._makeOne(self.CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) + self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) + self.assertTrue(cluster._instance is instance) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) def test_copy(self): - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - display_name = 'display_name' - serve_nodes = 8 - - client = _Client(project) - cluster = self._makeOne(zone, cluster_id, client, - display_name=display_name, - serve_nodes=serve_nodes) + SERVE_NODES = 8 + + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) new_cluster = cluster.copy() # Make sure the client copy succeeded. - self.assertFalse(new_cluster._client is client) - self.assertEqual(new_cluster._client, client) + self.assertFalse(new_cluster._instance is instance) + self.assertEqual(new_cluster.serve_nodes, SERVE_NODES) # Make sure the client got copied to a new instance. self.assertFalse(cluster is new_cluster) self.assertEqual(cluster, new_cluster) - def test_table_factory(self): - from gcloud.bigtable.table import Table - - zone = 'zone' - cluster_id = 'cluster-id' - cluster = self._makeOne(zone, cluster_id, None) - - table_id = 'table_id' - table = cluster.table(table_id) - self.assertTrue(isinstance(table, Table)) - self.assertEqual(table.table_id, table_id) - self.assertEqual(table._cluster, cluster) - def test__update_from_pb_success(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - display_name = 'display_name' - serve_nodes = 8 - cluster_pb = data_v1_pb2.Cluster( - display_name=display_name, - serve_nodes=serve_nodes, + SERVE_NODES = 8 + cluster_pb = _ClusterPB( + serve_nodes=SERVE_NODES, ) + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(None, None, None) - self.assertEqual(cluster.display_name, None) + cluster = self._makeOne(self.CLUSTER_ID, instance) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) cluster._update_from_pb(cluster_pb) - self.assertEqual(cluster.display_name, display_name) - self.assertEqual(cluster.serve_nodes, serve_nodes) - - def test__update_from_pb_no_display_name(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - - cluster_pb = data_v1_pb2.Cluster(serve_nodes=331) - cluster = self._makeOne(None, None, None) - self.assertEqual(cluster.display_name, None) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - with self.assertRaises(ValueError): - cluster._update_from_pb(cluster_pb) - self.assertEqual(cluster.display_name, None) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) def test__update_from_pb_no_serve_nodes(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - cluster_pb = data_v1_pb2.Cluster(display_name='name') - cluster = self._makeOne(None, None, None) - self.assertEqual(cluster.display_name, None) + cluster_pb = _ClusterPB() + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster = self._makeOne(self.CLUSTER_ID, instance) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) with self.assertRaises(ValueError): cluster._update_from_pb(cluster_pb) - self.assertEqual(cluster.display_name, None) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) def test_from_pb_success(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - client = _Client(project=project) - - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster_pb = data_v1_pb2.Cluster( - name=cluster_name, - display_name=cluster_id, - serve_nodes=331, + SERVE_NODES = 331 + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster_pb = _ClusterPB( + name=self.CLUSTER_NAME, + serve_nodes=SERVE_NODES, ) klass = self._getTargetClass() - cluster = klass.from_pb(cluster_pb, client) + cluster = klass.from_pb(cluster_pb, instance) self.assertTrue(isinstance(cluster, klass)) - self.assertEqual(cluster._client, client) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) + self.assertTrue(cluster._instance is instance) + self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) def test_from_pb_bad_cluster_name(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - - cluster_name = 'INCORRECT_FORMAT' - cluster_pb = data_v1_pb2.Cluster(name=cluster_name) + BAD_CLUSTER_NAME = 'INCORRECT_FORMAT' + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster_pb = _ClusterPB(name=BAD_CLUSTER_NAME) klass = self._getTargetClass() with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, None) + klass.from_pb(cluster_pb, instance) def test_from_pb_project_mistmatch(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) + ALT_PROJECT = 'ALT_PROJECT' + client = _Client(ALT_PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + self.assertNotEqual(self.PROJECT, ALT_PROJECT) + + cluster_pb = _ClusterPB(name=self.CLUSTER_NAME) - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - alt_project = 'ALT_PROJECT' - client = _Client(project=alt_project) + klass = self._getTargetClass() + with self.assertRaises(ValueError): + klass.from_pb(cluster_pb, instance) - self.assertNotEqual(project, alt_project) + def test_from_pb_instance_mistmatch(self): + ALT_INSTANCE_ID = 'ALT_INSTANCE_ID' + client = _Client(self.PROJECT) + instance = _Instance(ALT_INSTANCE_ID, client) - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster_pb = data_v1_pb2.Cluster(name=cluster_name) + self.assertNotEqual(self.INSTANCE_ID, ALT_INSTANCE_ID) + + cluster_pb = _ClusterPB(name=self.CLUSTER_NAME) klass = self._getTargetClass() with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, client) + klass.from_pb(cluster_pb, instance) def test_name_property(self): - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - client = _Client(project=project) + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(zone, cluster_id, client) - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - self.assertEqual(cluster.name, cluster_name) + cluster = self._makeOne(self.CLUSTER_ID, instance) + self.assertEqual(cluster.name, self.CLUSTER_NAME) def test___eq__(self): - zone = 'zone' - cluster_id = 'cluster_id' - client = object() - cluster1 = self._makeOne(zone, cluster_id, client) - cluster2 = self._makeOne(zone, cluster_id, client) + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne(self.CLUSTER_ID, instance) + cluster2 = self._makeOne(self.CLUSTER_ID, instance) self.assertEqual(cluster1, cluster2) def test___eq__type_differ(self): - cluster1 = self._makeOne('zone', 'cluster_id', 'client') + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne(self.CLUSTER_ID, instance) cluster2 = object() self.assertNotEqual(cluster1, cluster2) def test___ne__same_value(self): - zone = 'zone' - cluster_id = 'cluster_id' - client = object() - cluster1 = self._makeOne(zone, cluster_id, client) - cluster2 = self._makeOne(zone, cluster_id, client) + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne(self.CLUSTER_ID, instance) + cluster2 = self._makeOne(self.CLUSTER_ID, instance) comparison_val = (cluster1 != cluster2) self.assertFalse(comparison_val) def test___ne__(self): - cluster1 = self._makeOne('zone1', 'cluster_id1', 'client1') - cluster2 = self._makeOne('zone2', 'cluster_id2', 'client2') + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne('cluster_id1', instance) + cluster2 = self._makeOne('cluster_id2', instance) self.assertNotEqual(cluster1, cluster2) def test_reload(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 123 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) + SERVE_NODES = 31 + LOCATION = 'LOCATION' + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance) # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = messages_v1_pb2.GetClusterRequest(name=cluster_name) + request_pb = _GetClusterRequestPB(name=self.CLUSTER_NAME) # Create response_pb - serve_nodes = 31 - display_name = u'hey-hi-hello' - response_pb = data_v1_pb2.Cluster( - display_name=display_name, - serve_nodes=serve_nodes, + response_pb = _ClusterPB( + serve_nodes=SERVE_NODES, + location=LOCATION, ) # Patch the stub used by the API method. @@ -384,55 +328,46 @@ def test_reload(self): # Check Cluster optional config values before. self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - self.assertEqual(cluster.display_name, cluster_id) # Perform the method and check the result. result = cluster.reload() self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'GetCluster', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) # Check Cluster optional config values before. - self.assertEqual(cluster.serve_nodes, serve_nodes) - self.assertEqual(cluster.display_name, display_name) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) + self.assertEqual(cluster.location, LOCATION) def test_create(self): from google.longrunning import operations_pb2 from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import cluster as MUT - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 578 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance) # Create request_pb. Just a mock since we monkey patch # _prepare_create_request request_pb = object() # Create response_pb - op_id = 5678 - op_begin = object() - op_name = ('operations/projects/%s/zones/%s/clusters/%s/' - 'operations/%d' % (project, zone, cluster_id, op_id)) - current_op = operations_pb2.Operation(name=op_name) - response_pb = data_v1_pb2.Cluster(current_operation=current_op) + OP_ID = 5678 + OP_NAME = ( + 'operations/projects/%s/instances/%s/clusters/%s/operations/%d' % + (self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID)) + response_pb = operations_pb2.Operation(name=OP_NAME) # Patch the stub used by the API method. client._cluster_stub = stub = _FakeStub(response_pb) # Create expected_result. - expected_result = MUT.Operation('create', op_id, op_begin, - cluster=cluster) + expected_result = MUT.Operation('create', OP_ID, cluster=cluster) # Create the mocks. prep_create_called = [] @@ -445,7 +380,7 @@ def mock_prep_create_req(cluster): def mock_process_operation(operation_pb): process_operation_called.append(operation_pb) - return op_id, op_begin + return OP_ID # Perform the method and check the result. with _Monkey(MUT, _prepare_create_request=mock_prep_create_req, @@ -455,60 +390,47 @@ def mock_process_operation(operation_pb): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'CreateCluster', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) self.assertEqual(prep_create_called, [cluster]) - self.assertEqual(process_operation_called, [current_op]) + self.assertEqual(process_operation_called, [response_pb]) def test_update(self): from google.longrunning import operations_pb2 from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import cluster as MUT - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - serve_nodes = 81 - display_name = 'display_name' - timeout_seconds = 9 + SERVE_NODES = 81 - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client, - display_name=display_name, - serve_nodes=serve_nodes) + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = data_v1_pb2.Cluster( - name=cluster_name, - display_name=display_name, - serve_nodes=serve_nodes, + request_pb = _ClusterPB( + name=self.CLUSTER_NAME, + serve_nodes=SERVE_NODES, ) # Create response_pb - current_op = operations_pb2.Operation() - response_pb = data_v1_pb2.Cluster(current_operation=current_op) + response_pb = operations_pb2.Operation() # Patch the stub used by the API method. client._cluster_stub = stub = _FakeStub(response_pb) # Create expected_result. - op_id = 5678 - op_begin = object() - expected_result = MUT.Operation('update', op_id, op_begin, - cluster=cluster) + OP_ID = 5678 + expected_result = MUT.Operation('update', OP_ID, cluster=cluster) # Create mocks process_operation_called = [] def mock_process_operation(operation_pb): process_operation_called.append(operation_pb) - return op_id, op_begin + return OP_ID # Perform the method and check the result. with _Monkey(MUT, _process_operation=mock_process_operation): @@ -517,29 +439,21 @@ def mock_process_operation(operation_pb): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'UpdateCluster', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) - self.assertEqual(process_operation_called, [current_op]) + self.assertEqual(process_operation_called, [response_pb]) def test_delete(self): from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 57 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance) # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = messages_v1_pb2.DeleteClusterRequest(name=cluster_name) + request_pb = _DeleteClusterRequestPB(name=self.CLUSTER_NAME) # Create response_pb response_pb = empty_pb2.Empty() @@ -556,128 +470,10 @@ def test_delete(self): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'DeleteCluster', - (request_pb, timeout_seconds), - {}, - )]) - - def test_undelete(self): - from google.longrunning import operations_pb2 - from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) - from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable import cluster as MUT - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 78 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) - - # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = messages_v1_pb2.UndeleteClusterRequest(name=cluster_name) - - # Create response_pb - response_pb = operations_pb2.Operation() - - # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - op_id = 5678 - op_begin = object() - expected_result = MUT.Operation('undelete', op_id, op_begin, - cluster=cluster) - - # Create the mocks. - process_operation_called = [] - - def mock_process_operation(operation_pb): - process_operation_called.append(operation_pb) - return op_id, op_begin - - # Perform the method and check the result. - with _Monkey(MUT, _process_operation=mock_process_operation): - result = cluster.undelete() - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'UndeleteCluster', - (request_pb, timeout_seconds), - {}, - )]) - self.assertEqual(process_operation_called, [response_pb]) - - def _list_tables_helper(self, table_id, table_name=None): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as table_data_pb2) - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as table_messages_v1_pb2) - from gcloud.bigtable._testing import _FakeStub - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 45 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) - - # Create request_ - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = table_messages_v1_pb2.ListTablesRequest( - name=cluster_name) - - # Create response_pb - table_name = table_name or (cluster_name + '/tables/' + table_id) - response_pb = table_messages_v1_pb2.ListTablesResponse( - tables=[ - table_data_pb2.Table(name=table_name), - ], - ) - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_table = cluster.table(table_id) - expected_result = [expected_table] - - # Perform the method and check the result. - result = cluster.list_tables() - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ListTables', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) - def test_list_tables(self): - table_id = 'table_id' - self._list_tables_helper(table_id) - - def test_list_tables_failure_bad_split(self): - with self.assertRaises(ValueError): - self._list_tables_helper(None, table_name='wrong-format') - - def test_list_tables_failure_name_bad_before(self): - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - - table_id = 'table_id' - bad_table_name = ('nonempty-section-before' + - 'projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id + '/tables/' + table_id) - with self.assertRaises(ValueError): - self._list_tables_helper(table_id, table_name=bad_table_name) - class Test__prepare_create_request(unittest2.TestCase): @@ -686,30 +482,23 @@ def _callFUT(self, cluster): return _prepare_create_request(cluster) def test_it(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) from gcloud.bigtable.cluster import Cluster - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - display_name = u'DISPLAY_NAME' - serve_nodes = 8 - client = _Client(project) + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + CLUSTER_ID = 'cluster-id' + SERVE_NODES = 8 + + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + cluster = Cluster(CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) - cluster = Cluster(zone, cluster_id, client, - display_name=display_name, serve_nodes=serve_nodes) request_pb = self._callFUT(cluster) - self.assertTrue(isinstance(request_pb, - messages_v1_pb2.CreateClusterRequest)) - self.assertEqual(request_pb.cluster_id, cluster_id) - self.assertEqual(request_pb.name, - 'projects/' + project + '/zones/' + zone) - self.assertTrue(isinstance(request_pb.cluster, data_v1_pb2.Cluster)) - self.assertEqual(request_pb.cluster.display_name, display_name) - self.assertEqual(request_pb.cluster.serve_nodes, serve_nodes) + + self.assertEqual(request_pb.cluster_id, CLUSTER_ID) + self.assertEqual(request_pb.name, instance.name) + self.assertEqual(request_pb.cluster.serve_nodes, SERVE_NODES) class Test__parse_pb_any_to_native(unittest2.TestCase): @@ -721,17 +510,16 @@ def _callFUT(self, any_val, expected_type=None): def test_with_known_type_url(self): from google.protobuf import any_pb2 from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_data_pb2 as data_v1_pb2) from gcloud.bigtable import cluster as MUT - type_url = 'type.googleapis.com/' + data_v1_pb2._CELL.full_name - fake_type_url_map = {type_url: data_v1_pb2.Cell} - - cell = data_v1_pb2.Cell( + cell = _CellPB( timestamp_micros=0, value=b'foobar', ) + + type_url = 'type.googleapis.com/' + cell.DESCRIPTOR.full_name + fake_type_url_map = {type_url: cell.__class__} + any_val = any_pb2.Any( type_url=type_url, value=cell.SerializeToString(), @@ -741,83 +529,6 @@ def test_with_known_type_url(self): self.assertEqual(result, cell) - def test_with_create_cluster_metadata(self): - from google.protobuf import any_pb2 - from google.protobuf.timestamp_pb2 import Timestamp - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) - - type_url = ('type.googleapis.com/' + - messages_v1_pb2._CREATECLUSTERMETADATA.full_name) - metadata = messages_v1_pb2.CreateClusterMetadata( - request_time=Timestamp(seconds=1, nanos=1234), - finish_time=Timestamp(seconds=10, nanos=891011), - original_request=messages_v1_pb2.CreateClusterRequest( - name='foo', - cluster_id='bar', - cluster=data_v1_pb2.Cluster( - display_name='quux', - serve_nodes=1337, - ), - ), - ) - - any_val = any_pb2.Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - result = self._callFUT(any_val) - self.assertEqual(result, metadata) - - def test_with_update_cluster_metadata(self): - from google.protobuf import any_pb2 - from google.protobuf.timestamp_pb2 import Timestamp - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) - - type_url = ('type.googleapis.com/' + - messages_v1_pb2._UPDATECLUSTERMETADATA.full_name) - metadata = messages_v1_pb2.UpdateClusterMetadata( - request_time=Timestamp(seconds=1, nanos=1234), - finish_time=Timestamp(seconds=10, nanos=891011), - cancel_time=Timestamp(seconds=100, nanos=76543), - original_request=data_v1_pb2.Cluster( - display_name='the-end', - serve_nodes=42, - ), - ) - - any_val = any_pb2.Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - result = self._callFUT(any_val) - self.assertEqual(result, metadata) - - def test_with_undelete_cluster_metadata(self): - from google.protobuf import any_pb2 - from google.protobuf.timestamp_pb2 import Timestamp - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) - - type_url = ('type.googleapis.com/' + - messages_v1_pb2._UNDELETECLUSTERMETADATA.full_name) - metadata = messages_v1_pb2.UndeleteClusterMetadata( - request_time=Timestamp(seconds=1, nanos=1234), - finish_time=Timestamp(seconds=10, nanos=891011), - ) - - any_val = any_pb2.Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - result = self._callFUT(any_val) - self.assertEqual(result, metadata) - def test_unknown_type_url(self): from google.protobuf import any_pb2 from gcloud._testing import _Monkey @@ -851,58 +562,72 @@ def _callFUT(self, operation_pb): def test_it(self): from google.longrunning import operations_pb2 - from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_v1_pb2) - from gcloud.bigtable import cluster as MUT - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - expected_operation_id = 234 - operation_name = ('operations/projects/%s/zones/%s/clusters/%s/' - 'operations/%d' % (project, zone, cluster_id, - expected_operation_id)) - current_op = operations_pb2.Operation(name=operation_name) + PROJECT = 'project' + INSTANCE_ID = 'instance-id' + CLUSTER_ID = 'cluster-id' + EXPECTED_OPERATION_ID = 234 + OPERATION_NAME = ( + 'operations/projects/%s/instances/%s/clusters/%s/operations/%d' % + (PROJECT, INSTANCE_ID, CLUSTER_ID, EXPECTED_OPERATION_ID)) - # Create mocks. - request_metadata = messages_v1_pb2.CreateClusterMetadata() - parse_pb_any_called = [] - - def mock_parse_pb_any_to_native(any_val, expected_type=None): - parse_pb_any_called.append((any_val, expected_type)) - return request_metadata - - expected_operation_begin = object() - ts_to_dt_called = [] - - def mock_pb_timestamp_to_datetime(timestamp): - ts_to_dt_called.append(timestamp) - return expected_operation_begin + operation_pb = operations_pb2.Operation(name=OPERATION_NAME) # Exectute method with mocks in place. - with _Monkey(MUT, _parse_pb_any_to_native=mock_parse_pb_any_to_native, - _pb_timestamp_to_datetime=mock_pb_timestamp_to_datetime): - operation_id, operation_begin = self._callFUT(current_op) + operation_id = self._callFUT(operation_pb) # Check outputs. - self.assertEqual(operation_id, expected_operation_id) - self.assertTrue(operation_begin is expected_operation_begin) - - # Check mocks were used correctly. - self.assertEqual(parse_pb_any_called, [(current_op.metadata, None)]) - self.assertEqual(ts_to_dt_called, [request_metadata.request_time]) + self.assertEqual(operation_id, EXPECTED_OPERATION_ID) def test_op_name_parsing_failure(self): from google.longrunning import operations_pb2 - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_v1_pb2) - current_op = operations_pb2.Operation(name='invalid') - cluster = data_v1_pb2.Cluster(current_operation=current_op) + operation_pb = operations_pb2.Operation(name='invalid') with self.assertRaises(ValueError): - self._callFUT(cluster) + self._callFUT(operation_pb) + + +def _CellPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Cell(*args, **kw) + + +def _ClusterPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as instance_v2_pb2) + return instance_v2_pb2.Cluster(*args, **kw) + + +def _DeleteClusterRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + return messages_v2_pb2.DeleteClusterRequest(*args, **kw) + + +def _GetClusterRequestPB(*args, **kw): + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + return messages_v2_pb2.GetClusterRequest(*args, **kw) + + +class _Instance(object): + + def __init__(self, instance_id, client): + self.instance_id = instance_id + self._client = client + + @property + def name(self): + return 'projects/%s/instances/%s' % ( + self._client.project, self.instance_id) + + def copy(self): + return self.__class__(self.instance_id, self._client) + + def __eq__(self, other): + return (other.instance_id == self.instance_id and + other._client == self._client) class _Client(object): @@ -912,10 +637,6 @@ def __init__(self, project, timeout_seconds=None): self.project_name = 'projects/' + self.project self.timeout_seconds = timeout_seconds - def copy(self): - from copy import deepcopy - return deepcopy(self) - def __eq__(self, other): return (other.project == self.project and other.project_name == self.project_name and diff --git a/gcloud/bigtable/test_instance.py b/gcloud/bigtable/test_instance.py new file mode 100644 index 000000000000..fbe2f384cfb2 --- /dev/null +++ b/gcloud/bigtable/test_instance.py @@ -0,0 +1,752 @@ +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import datetime +import unittest2 + + +class TestOperation(unittest2.TestCase): + + OP_TYPE = 'fake-op' + OP_ID = 8915 + BEGIN = datetime.datetime(2015, 10, 22, 1, 1) + + def _getTargetClass(self): + from gcloud.bigtable.instance import Operation + return Operation + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def _constructor_test_helper(self, instance=None): + operation = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + + self.assertEqual(operation.op_type, self.OP_TYPE) + self.assertEqual(operation.op_id, self.OP_ID) + self.assertEqual(operation.begin, self.BEGIN) + self.assertEqual(operation._instance, instance) + self.assertFalse(operation._complete) + + def test_constructor_defaults(self): + self._constructor_test_helper() + + def test_constructor_explicit_instance(self): + instance = object() + self._constructor_test_helper(instance=instance) + + def test___eq__(self): + instance = object() + operation1 = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + operation2 = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + self.assertEqual(operation1, operation2) + + def test___eq__type_differ(self): + operation1 = self._makeOne('foo', 123, None) + operation2 = object() + self.assertNotEqual(operation1, operation2) + + def test___ne__same_value(self): + instance = object() + operation1 = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + operation2 = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + comparison_val = (operation1 != operation2) + self.assertFalse(comparison_val) + + def test___ne__(self): + operation1 = self._makeOne('foo', 123, None) + operation2 = self._makeOne('bar', 456, None) + self.assertNotEqual(operation1, operation2) + + def test_finished_without_operation(self): + operation = self._makeOne(None, None, None) + operation._complete = True + with self.assertRaises(ValueError): + operation.finished() + + def _finished_helper(self, done): + from google.longrunning import operations_pb2 + from gcloud.bigtable._testing import _FakeStub + from gcloud.bigtable.instance import Instance + + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + TIMEOUT_SECONDS = 1 + + client = _Client(PROJECT, timeout_seconds=TIMEOUT_SECONDS) + instance = Instance(INSTANCE_ID, client) + operation = self._makeOne( + self.OP_TYPE, self.OP_ID, self.BEGIN, instance=instance) + + # Create request_pb + op_name = ('operations/projects/' + PROJECT + + '/instances/' + INSTANCE_ID + + '/operations/%d' % (self.OP_ID,)) + request_pb = operations_pb2.GetOperationRequest(name=op_name) + + # Create response_pb + response_pb = operations_pb2.Operation(done=done) + + # Patch the stub used by the API method. + client._operations_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = done + + # Perform the method and check the result. + result = operation.finished() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'GetOperation', + (request_pb, TIMEOUT_SECONDS), + {}, + )]) + + if done: + self.assertTrue(operation._complete) + else: + self.assertFalse(operation._complete) + + def test_finished(self): + self._finished_helper(done=True) + + def test_finished_not_done(self): + self._finished_helper(done=False) + + +class TestInstance(unittest2.TestCase): + + PROJECT = 'project' + INSTANCE_ID = 'instance-id' + INSTANCE_NAME = ('projects/' + PROJECT + '/instances/' + INSTANCE_ID) + DISPLAY_NAME = 'display_name' + OP_ID = 8915 + OP_NAME = ('operations/projects/%s/instances/%soperations/%d' % + (PROJECT, INSTANCE_ID, OP_ID)) + TABLE_ID = 'table_id' + TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID + TIMEOUT_SECONDS = 1 + + def _getTargetClass(self): + from gcloud.bigtable.instance import Instance + return Instance + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor_defaults(self): + client = object() + + instance = self._makeOne(self.INSTANCE_ID, client) + self.assertEqual(instance.instance_id, self.INSTANCE_ID) + self.assertEqual(instance.display_name, self.INSTANCE_ID) + self.assertTrue(instance._client is client) + + def test_constructor_non_default(self): + display_name = 'display_name' + client = object() + + instance = self._makeOne(self.INSTANCE_ID, client, + display_name=display_name) + self.assertEqual(instance.instance_id, self.INSTANCE_ID) + self.assertEqual(instance.display_name, display_name) + self.assertTrue(instance._client is client) + + def test_copy(self): + display_name = 'display_name' + + client = _Client(self.PROJECT) + instance = self._makeOne(self.INSTANCE_ID, client, + display_name=display_name) + new_instance = instance.copy() + + # Make sure the client copy succeeded. + self.assertFalse(new_instance._client is client) + self.assertEqual(new_instance._client, client) + # Make sure the client got copied to a new instance. + self.assertFalse(instance is new_instance) + self.assertEqual(instance, new_instance) + + def test_table_factory(self): + from gcloud.bigtable.table import Table + + instance = self._makeOne(self.INSTANCE_ID, None) + + table = instance.table(self.TABLE_ID) + self.assertTrue(isinstance(table, Table)) + self.assertEqual(table.table_id, self.TABLE_ID) + self.assertEqual(table._instance, instance) + + def test__update_from_pb_success(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + display_name = 'display_name' + instance_pb = data_v2_pb2.Instance( + display_name=display_name, + ) + + instance = self._makeOne(None, None, None) + self.assertEqual(instance.display_name, None) + instance._update_from_pb(instance_pb) + self.assertEqual(instance.display_name, display_name) + + def test__update_from_pb_no_display_name(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + instance_pb = data_v2_pb2.Instance() + instance = self._makeOne(None, None, None) + self.assertEqual(instance.display_name, None) + with self.assertRaises(ValueError): + instance._update_from_pb(instance_pb) + self.assertEqual(instance.display_name, None) + + def test_from_pb_success(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + client = _Client(project=self.PROJECT) + + instance_pb = data_v2_pb2.Instance( + name=self.INSTANCE_NAME, + display_name=self.INSTANCE_ID, + ) + + klass = self._getTargetClass() + instance = klass.from_pb(instance_pb, client) + self.assertTrue(isinstance(instance, klass)) + self.assertEqual(instance._client, client) + self.assertEqual(instance.instance_id, self.INSTANCE_ID) + + def test_from_pb_bad_instance_name(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + instance_name = 'INCORRECT_FORMAT' + instance_pb = data_v2_pb2.Instance(name=instance_name) + + klass = self._getTargetClass() + with self.assertRaises(ValueError): + klass.from_pb(instance_pb, None) + + def test_from_pb_project_mistmatch(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + + ALT_PROJECT = 'ALT_PROJECT' + client = _Client(project=ALT_PROJECT) + + self.assertNotEqual(self.PROJECT, ALT_PROJECT) + + instance_pb = data_v2_pb2.Instance(name=self.INSTANCE_NAME) + + klass = self._getTargetClass() + with self.assertRaises(ValueError): + klass.from_pb(instance_pb, client) + + def test_name_property(self): + client = _Client(project=self.PROJECT) + + instance = self._makeOne(self.INSTANCE_ID, client) + self.assertEqual(instance.name, self.INSTANCE_NAME) + + def test___eq__(self): + client = object() + instance1 = self._makeOne(self.INSTANCE_ID, client) + instance2 = self._makeOne(self.INSTANCE_ID, client) + self.assertEqual(instance1, instance2) + + def test___eq__type_differ(self): + client = object() + instance1 = self._makeOne(self.INSTANCE_ID, client) + instance2 = object() + self.assertNotEqual(instance1, instance2) + + def test___ne__same_value(self): + client = object() + instance1 = self._makeOne(self.INSTANCE_ID, client) + instance2 = self._makeOne(self.INSTANCE_ID, client) + comparison_val = (instance1 != instance2) + self.assertFalse(comparison_val) + + def test___ne__(self): + instance1 = self._makeOne('instance_id1', 'client1') + instance2 = self._makeOne('instance_id2', 'client2') + self.assertNotEqual(instance1, instance2) + + def test_reload(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from gcloud.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client) + + # Create request_pb + request_pb = messages_v2_pb.GetInstanceRequest( + name=self.INSTANCE_NAME) + + # Create response_pb + DISPLAY_NAME = u'hey-hi-hello' + response_pb = data_v2_pb2.Instance( + display_name=DISPLAY_NAME, + ) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # reload() has no return value. + + # Check Instance optional config values before. + self.assertEqual(instance.display_name, self.INSTANCE_ID) + + # Perform the method and check the result. + result = instance.reload() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'GetInstance', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + # Check Instance optional config values before. + self.assertEqual(instance.display_name, DISPLAY_NAME) + + def test_create(self): + from google.longrunning import operations_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable._testing import _FakeStub + from gcloud.bigtable import instance as MUT + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client) + + # Create request_pb. Just a mock since we monkey patch + # _prepare_create_request + request_pb = object() + + # Create response_pb + op_begin = object() + response_pb = operations_pb2.Operation(name=self.OP_NAME) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = MUT.Operation('create', self.OP_ID, op_begin, + instance=instance) + + # Create the mocks. + prep_create_called = [] + + def mock_prep_create_req(instance): + prep_create_called.append(instance) + return request_pb + + process_operation_called = [] + + def mock_process_operation(operation_pb): + process_operation_called.append(operation_pb) + return self.OP_ID, op_begin + + # Perform the method and check the result. + with _Monkey(MUT, + _prepare_create_request=mock_prep_create_req, + _process_operation=mock_process_operation): + result = instance.create() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'CreateInstance', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + self.assertEqual(prep_create_called, [instance]) + self.assertEqual(process_operation_called, [response_pb]) + + def test_update(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client, + display_name=self.DISPLAY_NAME) + + # Create request_pb + request_pb = data_v2_pb2.Instance( + name=self.INSTANCE_NAME, + display_name=self.DISPLAY_NAME, + ) + + # Create response_pb + response_pb = data_v2_pb2.Instance() + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None + + # Perform the method and check the result. + result = instance.update() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'UpdateInstance', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + def test_delete(self): + from google.protobuf import empty_pb2 + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from gcloud.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client) + + # Create request_pb + request_pb = messages_v2_pb.DeleteInstanceRequest( + name=self.INSTANCE_NAME) + + # Create response_pb + response_pb = empty_pb2.Empty() + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # delete() has no return value. + + # Perform the method and check the result. + result = instance.delete() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'DeleteInstance', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + def test_list_clusters(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as instance_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from gcloud.bigtable._testing import _FakeStub + + FAILED_LOCATION = 'FAILED' + FAILED_LOCATIONS = [FAILED_LOCATION] + CLUSTER_ID1 = 'cluster-id1' + CLUSTER_ID2 = 'cluster-id2' + SERVE_NODES = 4 + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client) + + CLUSTER_NAME1 = (instance.name + '/clusters/' + CLUSTER_ID1) + CLUSTER_NAME2 = (instance.name + '/clusters/' + CLUSTER_ID2) + # Create request_pb + request_pb = messages_v2_pb2.ListClustersRequest( + name=instance.name, + ) + + # Create response_pb + response_pb = messages_v2_pb2.ListClustersResponse( + failed_locations=[FAILED_LOCATION], + clusters=[ + instance_v2_pb2.Cluster( + name=CLUSTER_NAME1, + serve_nodes=SERVE_NODES, + ), + instance_v2_pb2.Cluster( + name=CLUSTER_NAME2, + serve_nodes=SERVE_NODES, + ), + ], + ) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + clusters = [ + instance.cluster(CLUSTER_ID1), + instance.cluster(CLUSTER_ID2), + ] + expected_result = (clusters, FAILED_LOCATIONS) + + # Perform the method and check the result. + result = instance.list_clusters() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ListClusters', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + def _list_tables_helper(self, table_name=None): + from gcloud.bigtable._generated_v2 import ( + table_pb2 as table_data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_table_admin_pb2 as table_messages_v1_pb2) + from gcloud.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS) + instance = self._makeOne(self.INSTANCE_ID, client) + + # Create request_ + request_pb = table_messages_v1_pb2.ListTablesRequest( + name=self.INSTANCE_NAME) + + # Create response_pb + if table_name is None: + table_name = self.TABLE_NAME + + response_pb = table_messages_v1_pb2.ListTablesResponse( + tables=[ + table_data_v2_pb2.Table(name=table_name), + ], + ) + + # Patch the stub used by the API method. + client._table_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_table = instance.table(self.TABLE_ID) + expected_result = [expected_table] + + # Perform the method and check the result. + result = instance.list_tables() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ListTables', + (request_pb, self.TIMEOUT_SECONDS), + {}, + )]) + + def test_list_tables(self): + self._list_tables_helper() + + def test_list_tables_failure_bad_split(self): + with self.assertRaises(ValueError): + self._list_tables_helper(table_name='wrong-format') + + def test_list_tables_failure_name_bad_before(self): + BAD_TABLE_NAME = ('nonempty-section-before' + + 'projects/' + self.PROJECT + + '/instances/' + self.INSTANCE_ID + + '/tables/' + self.TABLE_ID) + with self.assertRaises(ValueError): + self._list_tables_helper(table_name=BAD_TABLE_NAME) + + +class Test__prepare_create_request(unittest2.TestCase): + + def _callFUT(self, instance): + from gcloud.bigtable.instance import _prepare_create_request + return _prepare_create_request(instance) + + def test_it(self): + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from gcloud.bigtable.instance import Instance + + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + DISPLAY_NAME = u'DISPLAY_NAME' + client = _Client(PROJECT) + + instance = Instance(INSTANCE_ID, client, display_name=DISPLAY_NAME) + request_pb = self._callFUT(instance) + self.assertTrue(isinstance(request_pb, + messages_v2_pb.CreateInstanceRequest)) + self.assertEqual(request_pb.instance_id, INSTANCE_ID) + self.assertEqual(request_pb.name, + 'projects/' + PROJECT) + self.assertTrue(isinstance(request_pb.instance, data_v2_pb2.Instance)) + self.assertEqual(request_pb.instance.display_name, DISPLAY_NAME) + + +class Test__parse_pb_any_to_native(unittest2.TestCase): + + def _callFUT(self, any_val, expected_type=None): + from gcloud.bigtable.instance import _parse_pb_any_to_native + return _parse_pb_any_to_native(any_val, expected_type=expected_type) + + def test_with_known_type_url(self): + from google.protobuf import any_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable._generated_v2 import ( + data_pb2 as data_v2_pb2) + from gcloud.bigtable import instance as MUT + + TYPE_URL = 'type.googleapis.com/' + data_v2_pb2._CELL.full_name + fake_type_url_map = {TYPE_URL: data_v2_pb2.Cell} + + cell = data_v2_pb2.Cell( + timestamp_micros=0, + value=b'foobar', + ) + any_val = any_pb2.Any( + type_url=TYPE_URL, + value=cell.SerializeToString(), + ) + with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map): + result = self._callFUT(any_val) + + self.assertEqual(result, cell) + + def test_with_create_instance_metadata(self): + from google.protobuf import any_pb2 + from google.protobuf.timestamp_pb2 import Timestamp + from gcloud.bigtable._generated_v2 import ( + instance_pb2 as data_v2_pb2) + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + + TYPE_URL = ('type.googleapis.com/' + + messages_v2_pb._CREATEINSTANCEMETADATA.full_name) + metadata = messages_v2_pb.CreateInstanceMetadata( + request_time=Timestamp(seconds=1, nanos=1234), + finish_time=Timestamp(seconds=10, nanos=891011), + original_request=messages_v2_pb.CreateInstanceRequest( + name='foo', + instance_id='bar', + instance=data_v2_pb2.Instance( + display_name='quux', + ), + ), + ) + + any_val = any_pb2.Any( + type_url=TYPE_URL, + value=metadata.SerializeToString(), + ) + result = self._callFUT(any_val) + self.assertEqual(result, metadata) + + def test_unknown_type_url(self): + from google.protobuf import any_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable import instance as MUT + + fake_type_url_map = {} + any_val = any_pb2.Any() + with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map): + with self.assertRaises(KeyError): + self._callFUT(any_val) + + def test_disagreeing_type_url(self): + from google.protobuf import any_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable import instance as MUT + + TYPE_URL1 = 'foo' + TYPE_URL2 = 'bar' + fake_type_url_map = {TYPE_URL1: None} + any_val = any_pb2.Any(type_url=TYPE_URL2) + with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map): + with self.assertRaises(ValueError): + self._callFUT(any_val, expected_type=TYPE_URL1) + + +class Test__process_operation(unittest2.TestCase): + + def _callFUT(self, operation_pb): + from gcloud.bigtable.instance import _process_operation + return _process_operation(operation_pb) + + def test_it(self): + from google.longrunning import operations_pb2 + from gcloud._testing import _Monkey + from gcloud.bigtable._generated_v2 import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from gcloud.bigtable import instance as MUT + + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + EXPECTED_OPERATION_ID = 234 + OPERATION_NAME = ( + 'operations/projects/%s/instances/%s/operations/%d' % + (PROJECT, INSTANCE_ID, EXPECTED_OPERATION_ID)) + + current_op = operations_pb2.Operation(name=OPERATION_NAME) + + # Create mocks. + request_metadata = messages_v2_pb.CreateInstanceMetadata() + parse_pb_any_called = [] + + def mock_parse_pb_any_to_native(any_val, expected_type=None): + parse_pb_any_called.append((any_val, expected_type)) + return request_metadata + + expected_operation_begin = object() + ts_to_dt_called = [] + + def mock_pb_timestamp_to_datetime(timestamp): + ts_to_dt_called.append(timestamp) + return expected_operation_begin + + # Exectute method with mocks in place. + with _Monkey(MUT, _parse_pb_any_to_native=mock_parse_pb_any_to_native, + _pb_timestamp_to_datetime=mock_pb_timestamp_to_datetime): + operation_id, operation_begin = self._callFUT(current_op) + + # Check outputs. + self.assertEqual(operation_id, EXPECTED_OPERATION_ID) + self.assertTrue(operation_begin is expected_operation_begin) + + # Check mocks were used correctly. + self.assertEqual(parse_pb_any_called, [(current_op.metadata, None)]) + self.assertEqual(ts_to_dt_called, [request_metadata.request_time]) + + def test_op_name_parsing_failure(self): + from google.longrunning import operations_pb2 + + operation_pb = operations_pb2.Operation(name='invalid') + with self.assertRaises(ValueError): + self._callFUT(operation_pb) + + +class _Client(object): + + def __init__(self, project, timeout_seconds=None): + self.project = project + self.project_name = 'projects/' + self.project + self.timeout_seconds = timeout_seconds + + def copy(self): + from copy import deepcopy + return deepcopy(self) + + def __eq__(self, other): + return (other.project == self.project and + other.project_name == self.project_name and + other.timeout_seconds == self.timeout_seconds) diff --git a/gcloud/bigtable/test_table.py b/gcloud/bigtable/test_table.py index a6339329dfca..cd47ada3ba22 100644 --- a/gcloud/bigtable/test_table.py +++ b/gcloud/bigtable/test_table.py @@ -18,6 +18,12 @@ class TestTable(unittest2.TestCase): + PROJECT_ID = 'project-id' + INSTANCE_ID = 'instance-id' + INSTANCE_NAME = ('projects/' + PROJECT_ID + '/instances/' + INSTANCE_ID) + TABLE_ID = 'table-id' + TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID + TIMEOUT_SECONDS = 1333 ROW_KEY = b'row-key' FAMILY_NAME = u'family' QUALIFIER = b'qualifier' @@ -33,19 +39,19 @@ def _makeOne(self, *args, **kwargs): def test_constructor(self): table_id = 'table-id' - cluster = object() + instance = object() - table = self._makeOne(table_id, cluster) + table = self._makeOne(table_id, instance) self.assertEqual(table.table_id, table_id) - self.assertTrue(table._cluster is cluster) + self.assertTrue(table._instance is instance) def test_name_property(self): table_id = 'table-id' - cluster_name = 'cluster_name' + instance_name = 'instance_name' - cluster = _Cluster(cluster_name) - table = self._makeOne(table_id, cluster) - expected_name = cluster_name + '/tables/' + table_id + instance = _Instance(instance_name) + table = self._makeOne(table_id, instance) + expected_name = instance_name + '/tables/' + table_id self.assertEqual(table.name, expected_name) def test_column_family_factory(self): @@ -100,51 +106,40 @@ def test_row_factory_append(self): self.assertEqual(row._table, table) def test_row_factory_failure(self): - table_id = 'table-id' - table = self._makeOne(table_id, None) + table = self._makeOne(self.TABLE_ID, None) with self.assertRaises(ValueError): table.row(b'row_key', filter_=object(), append=True) def test___eq__(self): - table_id = 'table_id' - cluster = object() - table1 = self._makeOne(table_id, cluster) - table2 = self._makeOne(table_id, cluster) + instance = object() + table1 = self._makeOne(self.TABLE_ID, instance) + table2 = self._makeOne(self.TABLE_ID, instance) self.assertEqual(table1, table2) def test___eq__type_differ(self): - table1 = self._makeOne('table_id', None) + table1 = self._makeOne(self.TABLE_ID, None) table2 = object() self.assertNotEqual(table1, table2) def test___ne__same_value(self): - table_id = 'table_id' - cluster = object() - table1 = self._makeOne(table_id, cluster) - table2 = self._makeOne(table_id, cluster) + instance = object() + table1 = self._makeOne(self.TABLE_ID, instance) + table2 = self._makeOne(self.TABLE_ID, instance) comparison_val = (table1 != table2) self.assertFalse(comparison_val) def test___ne__(self): - table1 = self._makeOne('table_id1', 'cluster1') - table2 = self._makeOne('table_id2', 'cluster2') + table1 = self._makeOne('table_id1', 'instance1') + table2 = self._makeOne('table_id2', 'instance2') self.assertNotEqual(table1, table2) def _create_test_helper(self, initial_split_keys): from gcloud._helpers import _to_bytes from gcloud.bigtable._testing import _FakeStub - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 150 - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb splits_pb = [ @@ -152,8 +147,8 @@ def _create_test_helper(self, initial_split_keys): for key in initial_split_keys or ()] request_pb = _CreateTableRequestPB( initial_splits=splits_pb, - name=cluster_name, - table_id=table_id, + name=self.INSTANCE_NAME, + table_id=self.TABLE_ID, ) # Create response_pb @@ -170,7 +165,7 @@ def _create_test_helper(self, initial_split_keys): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'CreateTable', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) @@ -185,27 +180,18 @@ def test_create_with_split_keys(self): def _list_column_families_helper(self): from gcloud.bigtable._testing import _FakeStub - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 502 - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb - table_name = cluster_name + '/tables/' + table_id - request_pb = _GetTableRequestPB(name=table_name) + request_pb = _GetTableRequestPB(name=self.TABLE_NAME) # Create response_pb - column_family_id = 'foo' + COLUMN_FAMILY_ID = 'foo' column_family = _ColumnFamilyPB() response_pb = _TablePB( - column_families={column_family_id: column_family}, + column_families={COLUMN_FAMILY_ID: column_family}, ) # Patch the stub used by the API method. @@ -213,7 +199,7 @@ def _list_column_families_helper(self): # Create expected_result. expected_result = { - column_family_id: table.column_family(column_family_id), + COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID), } # Perform the method and check the result. @@ -221,7 +207,7 @@ def _list_column_families_helper(self): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'GetTable', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) @@ -232,21 +218,12 @@ def test_delete(self): from google.protobuf import empty_pb2 from gcloud.bigtable._testing import _FakeStub - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 871 - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb - table_name = cluster_name + '/tables/' + table_id - request_pb = _DeleteTableRequestPB(name=table_name) + request_pb = _DeleteTableRequestPB(name=self.TABLE_NAME) # Create response_pb response_pb = empty_pb2.Empty() @@ -262,7 +239,7 @@ def test_delete(self): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'DeleteTable', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) @@ -271,16 +248,9 @@ def _read_row_helper(self, chunks, expected_result): from gcloud.bigtable._testing import _FakeStub from gcloud.bigtable import table as MUT - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 596 - client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb request_pb = object() # Returned by our mock. @@ -305,7 +275,7 @@ def mock_create_row_request(table_name, row_key, filter_): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'ReadRows', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) self.assertEqual(mock_created, @@ -353,16 +323,9 @@ def test_read_rows(self): from gcloud.bigtable.row_data import PartialRowsData from gcloud.bigtable import table as MUT - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 1111 - client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb request_pb = object() # Returned by our mock. @@ -394,7 +357,7 @@ def mock_create_row_request(table_name, **kwargs): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'ReadRows', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) created_kwargs = { @@ -408,22 +371,12 @@ def mock_create_row_request(table_name, **kwargs): def test_sample_row_keys(self): from gcloud.bigtable._testing import _FakeStub - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 1333 - - client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) + client = _Client(timeout_seconds=self.TIMEOUT_SECONDS) + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) # Create request_pb - table_name = cluster_name + '/tables/' + table_id - request_pb = _SampleRowKeysRequestPB( - table_name=table_name) + request_pb = _SampleRowKeysRequestPB(table_name=self.TABLE_NAME) # Create response_iterator response_iterator = object() # Just passed to a mock. @@ -439,7 +392,7 @@ def test_sample_row_keys(self): self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'SampleRowKeys', - (request_pb, timeout_seconds), + (request_pb, self.TIMEOUT_SECONDS), {}, )]) @@ -591,7 +544,7 @@ def _ColumnFamilyPB(*args, **kw): class _Client(object): data_stub = None - cluster_stub = None + instance_stub = None operations_stub = None table_stub = None @@ -599,7 +552,7 @@ def __init__(self, timeout_seconds=None): self.timeout_seconds = timeout_seconds -class _Cluster(object): +class _Instance(object): def __init__(self, name, client=None): self.name = name