From 14696d117808c92ba7bd1757399f825999638ca4 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Mon, 29 Mar 2021 03:05:28 -0700 Subject: [PATCH 01/10] changes without context autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. --- .gitignore | 1 + docs/spanner_admin_database_v1/types.rst | 1 + docs/spanner_admin_instance_v1/types.rst | 1 + docs/spanner_v1/types.rst | 1 + .../services/database_admin/client.py | 11 +- .../services/instance_admin/client.py | 11 +- .../cloud/spanner_v1/proto/transaction.proto | 279 +++++++++++++++- google/cloud/spanner_v1/types/transaction.py | 303 +++++++++++++++++- synth.metadata | 2 +- 9 files changed, 592 insertions(+), 18 deletions(-) diff --git a/.gitignore b/.gitignore index 708cdcc9eb..b4243ced74 100644 --- a/.gitignore +++ b/.gitignore @@ -45,6 +45,7 @@ pip-log.txt # Built documentation docs/_build +bigquery/docs/generated docs.metadata # Virtual environment diff --git a/docs/spanner_admin_database_v1/types.rst b/docs/spanner_admin_database_v1/types.rst index fe6c27778b..95e1d7f88b 100644 --- a/docs/spanner_admin_database_v1/types.rst +++ b/docs/spanner_admin_database_v1/types.rst @@ -3,4 +3,5 @@ Types for Google Cloud Spanner Admin Database v1 API .. automodule:: google.cloud.spanner_admin_database_v1.types :members: + :undoc-members: :show-inheritance: diff --git a/docs/spanner_admin_instance_v1/types.rst b/docs/spanner_admin_instance_v1/types.rst index 250cf6bf9b..8f7204ebce 100644 --- a/docs/spanner_admin_instance_v1/types.rst +++ b/docs/spanner_admin_instance_v1/types.rst @@ -3,4 +3,5 @@ Types for Google Cloud Spanner Admin Instance v1 API .. automodule:: google.cloud.spanner_admin_instance_v1.types :members: + :undoc-members: :show-inheritance: diff --git a/docs/spanner_v1/types.rst b/docs/spanner_v1/types.rst index c7ff7e6c71..8678aba188 100644 --- a/docs/spanner_v1/types.rst +++ b/docs/spanner_v1/types.rst @@ -3,4 +3,5 @@ Types for Google Cloud Spanner v1 API .. automodule:: google.cloud.spanner_v1.types :members: + :undoc-members: :show-inheritance: diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py index 83cfeb248f..ad8c1bb9b3 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py @@ -1088,8 +1088,7 @@ def set_iam_policy( request = iam_policy.SetIamPolicyRequest(**request) elif not request: - # Null request, just make one. - request = iam_policy.SetIamPolicyRequest() + request = iam_policy.SetIamPolicyRequest(resource=resource,) if resource is not None: request.resource = resource @@ -1225,8 +1224,7 @@ def get_iam_policy( request = iam_policy.GetIamPolicyRequest(**request) elif not request: - # Null request, just make one. - request = iam_policy.GetIamPolicyRequest() + request = iam_policy.GetIamPolicyRequest(resource=resource,) if resource is not None: request.resource = resource @@ -1317,8 +1315,9 @@ def test_iam_permissions( request = iam_policy.TestIamPermissionsRequest(**request) elif not request: - # Null request, just make one. - request = iam_policy.TestIamPermissionsRequest() + request = iam_policy.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) if resource is not None: request.resource = resource diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py index 2dc7b8e6c3..e445d0f31d 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py @@ -1185,8 +1185,7 @@ def set_iam_policy( request = iam_policy.SetIamPolicyRequest(**request) elif not request: - # Null request, just make one. - request = iam_policy.SetIamPolicyRequest() + request = iam_policy.SetIamPolicyRequest(resource=resource,) if resource is not None: request.resource = resource @@ -1318,8 +1317,7 @@ def get_iam_policy( request = iam_policy.GetIamPolicyRequest(**request) elif not request: - # Null request, just make one. - request = iam_policy.GetIamPolicyRequest() + request = iam_policy.GetIamPolicyRequest(resource=resource,) if resource is not None: request.resource = resource @@ -1407,8 +1405,9 @@ def test_iam_permissions( request = iam_policy.TestIamPermissionsRequest(**request) elif not request: - # Null request, just make one. - request = iam_policy.TestIamPermissionsRequest() + request = iam_policy.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) if resource is not None: request.resource = resource diff --git a/google/cloud/spanner_v1/proto/transaction.proto b/google/cloud/spanner_v1/proto/transaction.proto index 5c6f494474..2cafefcb10 100644 --- a/google/cloud/spanner_v1/proto/transaction.proto +++ b/google/cloud/spanner_v1/proto/transaction.proto @@ -28,9 +28,284 @@ option java_package = "com.google.spanner.v1"; option php_namespace = "Google\\Cloud\\Spanner\\V1"; option ruby_package = "Google::Cloud::Spanner::V1"; -// TransactionOptions are used to specify different types of transactions. +// # Transactions // -// For more info, see: https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction +// +// Each session can have at most one active transaction at a time (note that +// standalone reads and queries use a transaction internally and do count +// towards the one transaction limit). After the active transaction is +// completed, the session can immediately be re-used for the next transaction. +// It is not necessary to create a new session for each transaction. +// +// # Transaction Modes +// +// Cloud Spanner supports three transaction modes: +// +// 1. Locking read-write. This type of transaction is the only way +// to write data into Cloud Spanner. These transactions rely on +// pessimistic locking and, if necessary, two-phase commit. +// Locking read-write transactions may abort, requiring the +// application to retry. +// +// 2. Snapshot read-only. This transaction type provides guaranteed +// consistency across several reads, but does not allow +// writes. Snapshot read-only transactions can be configured to +// read at timestamps in the past. Snapshot read-only +// transactions do not need to be committed. +// +// 3. Partitioned DML. This type of transaction is used to execute +// a single Partitioned DML statement. Partitioned DML partitions +// the key space and runs the DML statement over each partition +// in parallel using separate, internal transactions that commit +// independently. Partitioned DML transactions do not need to be +// committed. +// +// For transactions that only read, snapshot read-only transactions +// provide simpler semantics and are almost always faster. In +// particular, read-only transactions do not take locks, so they do +// not conflict with read-write transactions. As a consequence of not +// taking locks, they also do not abort, so retry loops are not needed. +// +// Transactions may only read/write data in a single database. They +// may, however, read/write data in different tables within that +// database. +// +// ## Locking Read-Write Transactions +// +// Locking transactions may be used to atomically read-modify-write +// data anywhere in a database. This type of transaction is externally +// consistent. +// +// Clients should attempt to minimize the amount of time a transaction +// is active. Faster transactions commit with higher probability +// and cause less contention. Cloud Spanner attempts to keep read locks +// active as long as the transaction continues to do reads, and the +// transaction has not been terminated by +// [Commit][google.spanner.v1.Spanner.Commit] or +// [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of +// inactivity at the client may cause Cloud Spanner to release a +// transaction's locks and abort it. +// +// Conceptually, a read-write transaction consists of zero or more +// reads or SQL statements followed by +// [Commit][google.spanner.v1.Spanner.Commit]. At any time before +// [Commit][google.spanner.v1.Spanner.Commit], the client can send a +// [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the +// transaction. +// +// ### Semantics +// +// Cloud Spanner can commit the transaction if all read locks it acquired +// are still valid at commit time, and it is able to acquire write +// locks for all writes. Cloud Spanner can abort the transaction for any +// reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees +// that the transaction has not modified any user data in Cloud Spanner. +// +// Unless the transaction commits, Cloud Spanner makes no guarantees about +// how long the transaction's locks were held for. It is an error to +// use Cloud Spanner locks for any sort of mutual exclusion other than +// between Cloud Spanner transactions themselves. +// +// ### Retrying Aborted Transactions +// +// When a transaction aborts, the application can choose to retry the +// whole transaction again. To maximize the chances of successfully +// committing the retry, the client should execute the retry in the +// same session as the original attempt. The original session's lock +// priority increases with each consecutive abort, meaning that each +// attempt has a slightly better chance of success than the previous. +// +// Under some circumstances (e.g., many transactions attempting to +// modify the same row(s)), a transaction can abort many times in a +// short period before successfully committing. Thus, it is not a good +// idea to cap the number of retries a transaction can attempt; +// instead, it is better to limit the total amount of wall time spent +// retrying. +// +// ### Idle Transactions +// +// A transaction is considered idle if it has no outstanding reads or +// SQL queries and has not started a read or SQL query within the last 10 +// seconds. Idle transactions can be aborted by Cloud Spanner so that they +// don't hold on to locks indefinitely. In that case, the commit will +// fail with error `ABORTED`. +// +// If this behavior is undesirable, periodically executing a simple +// SQL query in the transaction (e.g., `SELECT 1`) prevents the +// transaction from becoming idle. +// +// ## Snapshot Read-Only Transactions +// +// Snapshot read-only transactions provides a simpler method than +// locking read-write transactions for doing several consistent +// reads. However, this type of transaction does not support writes. +// +// Snapshot transactions do not take locks. Instead, they work by +// choosing a Cloud Spanner timestamp, then executing all reads at that +// timestamp. Since they do not acquire locks, they do not block +// concurrent read-write transactions. +// +// Unlike locking read-write transactions, snapshot read-only +// transactions never abort. They can fail if the chosen read +// timestamp is garbage collected; however, the default garbage +// collection policy is generous enough that most applications do not +// need to worry about this in practice. +// +// Snapshot read-only transactions do not need to call +// [Commit][google.spanner.v1.Spanner.Commit] or +// [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not +// permitted to do so). +// +// To execute a snapshot transaction, the client specifies a timestamp +// bound, which tells Cloud Spanner how to choose a read timestamp. +// +// The types of timestamp bound are: +// +// - Strong (the default). +// - Bounded staleness. +// - Exact staleness. +// +// If the Cloud Spanner database to be read is geographically distributed, +// stale read-only transactions can execute more quickly than strong +// or read-write transaction, because they are able to execute far +// from the leader replica. +// +// Each type of timestamp bound is discussed in detail below. +// +// ### Strong +// +// Strong reads are guaranteed to see the effects of all transactions +// that have committed before the start of the read. Furthermore, all +// rows yielded by a single read are consistent with each other -- if +// any part of the read observes a transaction, all parts of the read +// see the transaction. +// +// Strong reads are not repeatable: two consecutive strong read-only +// transactions might return inconsistent results if there are +// concurrent writes. If consistency across reads is required, the +// reads should be executed within a transaction or at an exact read +// timestamp. +// +// See [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. +// +// ### Exact Staleness +// +// These timestamp bounds execute reads at a user-specified +// timestamp. Reads at a timestamp are guaranteed to see a consistent +// prefix of the global transaction history: they observe +// modifications done by all transactions with a commit timestamp <= +// the read timestamp, and observe none of the modifications done by +// transactions with a larger commit timestamp. They will block until +// all conflicting transactions that may be assigned commit timestamps +// <= the read timestamp have finished. +// +// The timestamp can either be expressed as an absolute Cloud Spanner commit +// timestamp or a staleness relative to the current time. +// +// These modes do not require a "negotiation phase" to pick a +// timestamp. As a result, they execute slightly faster than the +// equivalent boundedly stale concurrency modes. On the other hand, +// boundedly stale reads usually return fresher results. +// +// See [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] and +// [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. +// +// ### Bounded Staleness +// +// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, +// subject to a user-provided staleness bound. Cloud Spanner chooses the +// newest timestamp within the staleness bound that allows execution +// of the reads at the closest available replica without blocking. +// +// All rows yielded are consistent with each other -- if any part of +// the read observes a transaction, all parts of the read see the +// transaction. Boundedly stale reads are not repeatable: two stale +// reads, even if they use the same staleness bound, can execute at +// different timestamps and thus return inconsistent results. +// +// Boundedly stale reads execute in two phases: the first phase +// negotiates a timestamp among all replicas needed to serve the +// read. In the second phase, reads are executed at the negotiated +// timestamp. +// +// As a result of the two phase execution, bounded staleness reads are +// usually a little slower than comparable exact staleness +// reads. However, they are typically able to return fresher +// results, and are more likely to execute at the closest replica. +// +// Because the timestamp negotiation requires up-front knowledge of +// which rows will be read, it can only be used with single-use +// read-only transactions. +// +// See [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] and +// [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. +// +// ### Old Read Timestamps and Garbage Collection +// +// Cloud Spanner continuously garbage collects deleted and overwritten data +// in the background to reclaim storage space. This process is known +// as "version GC". By default, version GC reclaims versions after they +// are one hour old. Because of this, Cloud Spanner cannot perform reads +// at read timestamps more than one hour in the past. This +// restriction also applies to in-progress reads and/or SQL queries whose +// timestamp become too old while executing. Reads and SQL queries with +// too-old read timestamps fail with the error `FAILED_PRECONDITION`. +// +// ## Partitioned DML Transactions +// +// Partitioned DML transactions are used to execute DML statements with a +// different execution strategy that provides different, and often better, +// scalability properties for large, table-wide operations than DML in a +// ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, +// should prefer using ReadWrite transactions. +// +// Partitioned DML partitions the keyspace and runs the DML statement on each +// partition in separate, internal transactions. These transactions commit +// automatically when complete, and run independently from one another. +// +// To reduce lock contention, this execution strategy only acquires read locks +// on rows that match the WHERE clause of the statement. Additionally, the +// smaller per-partition transactions hold locks for less time. +// +// That said, Partitioned DML is not a drop-in replacement for standard DML used +// in ReadWrite transactions. +// +// - The DML statement must be fully-partitionable. Specifically, the statement +// must be expressible as the union of many statements which each access only +// a single row of the table. +// +// - The statement is not applied atomically to all rows of the table. Rather, +// the statement is applied atomically to partitions of the table, in +// independent transactions. Secondary index rows are updated atomically +// with the base table rows. +// +// - Partitioned DML does not guarantee exactly-once execution semantics +// against a partition. The statement will be applied at least once to each +// partition. It is strongly recommended that the DML statement should be +// idempotent to avoid unexpected results. For instance, it is potentially +// dangerous to run a statement such as +// `UPDATE table SET column = column + 1` as it could be run multiple times +// against some rows. +// +// - The partitions are committed automatically - there is no support for +// Commit or Rollback. If the call returns an error, or if the client issuing +// the ExecuteSql call dies, it is possible that some rows had the statement +// executed on them successfully. It is also possible that statement was +// never executed against other rows. +// +// - Partitioned DML transactions may only contain the execution of a single +// DML statement via ExecuteSql or ExecuteStreamingSql. +// +// - If any error is encountered during the execution of the partitioned DML +// operation (for instance, a UNIQUE INDEX violation, division by zero, or a +// value that cannot be stored due to schema constraints), then the +// operation is stopped at that point and an error is returned. It is +// possible that at this point, some partitions have been committed (or even +// committed multiple times), and other partitions have not been run at all. +// +// Given the above, Partitioned DML is good fit for large, database-wide, +// operations that are idempotent, such as deleting old rows from a very large +// table. message TransactionOptions { // Message type to initiate a read-write transaction. Currently this // transaction type has no options. diff --git a/google/cloud/spanner_v1/types/transaction.py b/google/cloud/spanner_v1/types/transaction.py index e20c6ad7b4..744fe2597a 100644 --- a/google/cloud/spanner_v1/types/transaction.py +++ b/google/cloud/spanner_v1/types/transaction.py @@ -29,9 +29,306 @@ class TransactionOptions(proto.Message): - r"""TransactionOptions are used to specify different types of transactions. - - For more info, see: https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction + r"""Transactions + ============ + + Each session can have at most one active transaction at a time (note + that standalone reads and queries use a transaction internally and + do count towards the one transaction limit). After the active + transaction is completed, the session can immediately be re-used for + the next transaction. It is not necessary to create a new session + for each transaction. + + Transaction Modes + ================= + + Cloud Spanner supports three transaction modes: + + 1. Locking read-write. This type of transaction is the only way to + write data into Cloud Spanner. These transactions rely on + pessimistic locking and, if necessary, two-phase commit. Locking + read-write transactions may abort, requiring the application to + retry. + + 2. Snapshot read-only. This transaction type provides guaranteed + consistency across several reads, but does not allow writes. + Snapshot read-only transactions can be configured to read at + timestamps in the past. Snapshot read-only transactions do not + need to be committed. + + 3. Partitioned DML. This type of transaction is used to execute a + single Partitioned DML statement. Partitioned DML partitions the + key space and runs the DML statement over each partition in + parallel using separate, internal transactions that commit + independently. Partitioned DML transactions do not need to be + committed. + + For transactions that only read, snapshot read-only transactions + provide simpler semantics and are almost always faster. In + particular, read-only transactions do not take locks, so they do not + conflict with read-write transactions. As a consequence of not + taking locks, they also do not abort, so retry loops are not needed. + + Transactions may only read/write data in a single database. They + may, however, read/write data in different tables within that + database. + + Locking Read-Write Transactions + ------------------------------- + + Locking transactions may be used to atomically read-modify-write + data anywhere in a database. This type of transaction is externally + consistent. + + Clients should attempt to minimize the amount of time a transaction + is active. Faster transactions commit with higher probability and + cause less contention. Cloud Spanner attempts to keep read locks + active as long as the transaction continues to do reads, and the + transaction has not been terminated by + [Commit][google.spanner.v1.Spanner.Commit] or + [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of + inactivity at the client may cause Cloud Spanner to release a + transaction's locks and abort it. + + Conceptually, a read-write transaction consists of zero or more + reads or SQL statements followed by + [Commit][google.spanner.v1.Spanner.Commit]. At any time before + [Commit][google.spanner.v1.Spanner.Commit], the client can send a + [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the + transaction. + + Semantics + ~~~~~~~~~ + + Cloud Spanner can commit the transaction if all read locks it + acquired are still valid at commit time, and it is able to acquire + write locks for all writes. Cloud Spanner can abort the transaction + for any reason. If a commit attempt returns ``ABORTED``, Cloud + Spanner guarantees that the transaction has not modified any user + data in Cloud Spanner. + + Unless the transaction commits, Cloud Spanner makes no guarantees + about how long the transaction's locks were held for. It is an error + to use Cloud Spanner locks for any sort of mutual exclusion other + than between Cloud Spanner transactions themselves. + + Retrying Aborted Transactions + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + When a transaction aborts, the application can choose to retry the + whole transaction again. To maximize the chances of successfully + committing the retry, the client should execute the retry in the + same session as the original attempt. The original session's lock + priority increases with each consecutive abort, meaning that each + attempt has a slightly better chance of success than the previous. + + Under some circumstances (e.g., many transactions attempting to + modify the same row(s)), a transaction can abort many times in a + short period before successfully committing. Thus, it is not a good + idea to cap the number of retries a transaction can attempt; + instead, it is better to limit the total amount of wall time spent + retrying. + + Idle Transactions + ~~~~~~~~~~~~~~~~~ + + A transaction is considered idle if it has no outstanding reads or + SQL queries and has not started a read or SQL query within the last + 10 seconds. Idle transactions can be aborted by Cloud Spanner so + that they don't hold on to locks indefinitely. In that case, the + commit will fail with error ``ABORTED``. + + If this behavior is undesirable, periodically executing a simple SQL + query in the transaction (e.g., ``SELECT 1``) prevents the + transaction from becoming idle. + + Snapshot Read-Only Transactions + ------------------------------- + + Snapshot read-only transactions provides a simpler method than + locking read-write transactions for doing several consistent reads. + However, this type of transaction does not support writes. + + Snapshot transactions do not take locks. Instead, they work by + choosing a Cloud Spanner timestamp, then executing all reads at that + timestamp. Since they do not acquire locks, they do not block + concurrent read-write transactions. + + Unlike locking read-write transactions, snapshot read-only + transactions never abort. They can fail if the chosen read timestamp + is garbage collected; however, the default garbage collection policy + is generous enough that most applications do not need to worry about + this in practice. + + Snapshot read-only transactions do not need to call + [Commit][google.spanner.v1.Spanner.Commit] or + [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not + permitted to do so). + + To execute a snapshot transaction, the client specifies a timestamp + bound, which tells Cloud Spanner how to choose a read timestamp. + + The types of timestamp bound are: + + - Strong (the default). + - Bounded staleness. + - Exact staleness. + + If the Cloud Spanner database to be read is geographically + distributed, stale read-only transactions can execute more quickly + than strong or read-write transaction, because they are able to + execute far from the leader replica. + + Each type of timestamp bound is discussed in detail below. + + Strong + ~~~~~~ + + Strong reads are guaranteed to see the effects of all transactions + that have committed before the start of the read. Furthermore, all + rows yielded by a single read are consistent with each other -- if + any part of the read observes a transaction, all parts of the read + see the transaction. + + Strong reads are not repeatable: two consecutive strong read-only + transactions might return inconsistent results if there are + concurrent writes. If consistency across reads is required, the + reads should be executed within a transaction or at an exact read + timestamp. + + See + [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. + + Exact Staleness + ~~~~~~~~~~~~~~~ + + These timestamp bounds execute reads at a user-specified timestamp. + Reads at a timestamp are guaranteed to see a consistent prefix of + the global transaction history: they observe modifications done by + all transactions with a commit timestamp <= the read timestamp, and + observe none of the modifications done by transactions with a larger + commit timestamp. They will block until all conflicting transactions + that may be assigned commit timestamps <= the read timestamp have + finished. + + The timestamp can either be expressed as an absolute Cloud Spanner + commit timestamp or a staleness relative to the current time. + + These modes do not require a "negotiation phase" to pick a + timestamp. As a result, they execute slightly faster than the + equivalent boundedly stale concurrency modes. On the other hand, + boundedly stale reads usually return fresher results. + + See + [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] + and + [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. + + Bounded Staleness + ~~~~~~~~~~~~~~~~~ + + Bounded staleness modes allow Cloud Spanner to pick the read + timestamp, subject to a user-provided staleness bound. Cloud Spanner + chooses the newest timestamp within the staleness bound that allows + execution of the reads at the closest available replica without + blocking. + + All rows yielded are consistent with each other -- if any part of + the read observes a transaction, all parts of the read see the + transaction. Boundedly stale reads are not repeatable: two stale + reads, even if they use the same staleness bound, can execute at + different timestamps and thus return inconsistent results. + + Boundedly stale reads execute in two phases: the first phase + negotiates a timestamp among all replicas needed to serve the read. + In the second phase, reads are executed at the negotiated timestamp. + + As a result of the two phase execution, bounded staleness reads are + usually a little slower than comparable exact staleness reads. + However, they are typically able to return fresher results, and are + more likely to execute at the closest replica. + + Because the timestamp negotiation requires up-front knowledge of + which rows will be read, it can only be used with single-use + read-only transactions. + + See + [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] + and + [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. + + Old Read Timestamps and Garbage Collection + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Cloud Spanner continuously garbage collects deleted and overwritten + data in the background to reclaim storage space. This process is + known as "version GC". By default, version GC reclaims versions + after they are one hour old. Because of this, Cloud Spanner cannot + perform reads at read timestamps more than one hour in the past. + This restriction also applies to in-progress reads and/or SQL + queries whose timestamp become too old while executing. Reads and + SQL queries with too-old read timestamps fail with the error + ``FAILED_PRECONDITION``. + + Partitioned DML Transactions + ---------------------------- + + Partitioned DML transactions are used to execute DML statements with + a different execution strategy that provides different, and often + better, scalability properties for large, table-wide operations than + DML in a ReadWrite transaction. Smaller scoped statements, such as + an OLTP workload, should prefer using ReadWrite transactions. + + Partitioned DML partitions the keyspace and runs the DML statement + on each partition in separate, internal transactions. These + transactions commit automatically when complete, and run + independently from one another. + + To reduce lock contention, this execution strategy only acquires + read locks on rows that match the WHERE clause of the statement. + Additionally, the smaller per-partition transactions hold locks for + less time. + + That said, Partitioned DML is not a drop-in replacement for standard + DML used in ReadWrite transactions. + + - The DML statement must be fully-partitionable. Specifically, the + statement must be expressible as the union of many statements + which each access only a single row of the table. + + - The statement is not applied atomically to all rows of the table. + Rather, the statement is applied atomically to partitions of the + table, in independent transactions. Secondary index rows are + updated atomically with the base table rows. + + - Partitioned DML does not guarantee exactly-once execution + semantics against a partition. The statement will be applied at + least once to each partition. It is strongly recommended that the + DML statement should be idempotent to avoid unexpected results. + For instance, it is potentially dangerous to run a statement such + as ``UPDATE table SET column = column + 1`` as it could be run + multiple times against some rows. + + - The partitions are committed automatically - there is no support + for Commit or Rollback. If the call returns an error, or if the + client issuing the ExecuteSql call dies, it is possible that some + rows had the statement executed on them successfully. It is also + possible that statement was never executed against other rows. + + - Partitioned DML transactions may only contain the execution of a + single DML statement via ExecuteSql or ExecuteStreamingSql. + + - If any error is encountered during the execution of the + partitioned DML operation (for instance, a UNIQUE INDEX + violation, division by zero, or a value that cannot be stored due + to schema constraints), then the operation is stopped at that + point and an error is returned. It is possible that at this + point, some partitions have been committed (or even committed + multiple times), and other partitions have not been run at all. + + Given the above, Partitioned DML is good fit for large, + database-wide, operations that are idempotent, such as deleting old + rows from a very large table. Attributes: read_write (google.cloud.spanner_v1.types.TransactionOptions.ReadWrite): diff --git a/synth.metadata b/synth.metadata index 72c4d0ff71..eff510075a 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-spanner.git", - "sha": "5ca63407847ad615dc51beaaaa7f16640daf0e23" + "sha": "75f834097a2753d9f22d6a9023e198f39fd0c086" } }, { From a7cdae36d7fd5cbe99cfa8e94ef4cc0543362cf3 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Mon, 29 Mar 2021 03:13:26 -0700 Subject: [PATCH 02/10] chore: update gapic-generator-python to 0.40.11 PiperOrigin-RevId: 359562873 Source-Author: Google APIs Source-Date: Thu Feb 25 10:52:32 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: 07932bb995e7dc91b43620ea8402c6668c7d102c Source-Link: https://github.com/googleapis/googleapis/commit/07932bb995e7dc91b43620ea8402c6668c7d102c --- .../services/database_admin/async_client.py | 32 +- .../services/database_admin/client.py | 8 +- .../services/instance_admin/async_client.py | 32 +- .../services/instance_admin/client.py | 8 +- .../services/spanner/async_client.py | 32 +- synth.metadata | 4 +- .../spanner_admin_database_v1/__init__.py | 15 + .../test_database_admin.py | 290 +++++++++++++++++- .../spanner_admin_instance_v1/__init__.py | 15 + .../test_instance_admin.py | 176 ++++++++++- tests/unit/gapic/spanner_v1/__init__.py | 15 + tests/unit/gapic/spanner_v1/test_spanner.py | 256 +++++++++++++++- 12 files changed, 859 insertions(+), 24 deletions(-) diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py index 31b97af061..bcc9da15e3 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py @@ -103,8 +103,36 @@ class DatabaseAdminAsyncClient: DatabaseAdminClient.parse_common_location_path ) - from_service_account_info = DatabaseAdminClient.from_service_account_info - from_service_account_file = DatabaseAdminClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatabaseAdminAsyncClient: The constructed client. + """ + return DatabaseAdminClient.from_service_account_info.__func__(DatabaseAdminAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatabaseAdminAsyncClient: The constructed client. + """ + return DatabaseAdminClient.from_service_account_file.__func__(DatabaseAdminAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py index ad8c1bb9b3..45db079111 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py @@ -1088,7 +1088,7 @@ def set_iam_policy( request = iam_policy.SetIamPolicyRequest(**request) elif not request: - request = iam_policy.SetIamPolicyRequest(resource=resource,) + request = iam_policy.SetIamPolicyRequest() if resource is not None: request.resource = resource @@ -1224,7 +1224,7 @@ def get_iam_policy( request = iam_policy.GetIamPolicyRequest(**request) elif not request: - request = iam_policy.GetIamPolicyRequest(resource=resource,) + request = iam_policy.GetIamPolicyRequest() if resource is not None: request.resource = resource @@ -1315,9 +1315,7 @@ def test_iam_permissions( request = iam_policy.TestIamPermissionsRequest(**request) elif not request: - request = iam_policy.TestIamPermissionsRequest( - resource=resource, permissions=permissions, - ) + request = iam_policy.TestIamPermissionsRequest() if resource is not None: request.resource = resource diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py index a83b1a2c1d..2a2ac56db3 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py @@ -106,8 +106,36 @@ class InstanceAdminAsyncClient: InstanceAdminClient.parse_common_location_path ) - from_service_account_info = InstanceAdminClient.from_service_account_info - from_service_account_file = InstanceAdminClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InstanceAdminAsyncClient: The constructed client. + """ + return InstanceAdminClient.from_service_account_info.__func__(InstanceAdminAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InstanceAdminAsyncClient: The constructed client. + """ + return InstanceAdminClient.from_service_account_file.__func__(InstanceAdminAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py index e445d0f31d..0985044bc2 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py @@ -1185,7 +1185,7 @@ def set_iam_policy( request = iam_policy.SetIamPolicyRequest(**request) elif not request: - request = iam_policy.SetIamPolicyRequest(resource=resource,) + request = iam_policy.SetIamPolicyRequest() if resource is not None: request.resource = resource @@ -1317,7 +1317,7 @@ def get_iam_policy( request = iam_policy.GetIamPolicyRequest(**request) elif not request: - request = iam_policy.GetIamPolicyRequest(resource=resource,) + request = iam_policy.GetIamPolicyRequest() if resource is not None: request.resource = resource @@ -1405,9 +1405,7 @@ def test_iam_permissions( request = iam_policy.TestIamPermissionsRequest(**request) elif not request: - request = iam_policy.TestIamPermissionsRequest( - resource=resource, permissions=permissions, - ) + request = iam_policy.TestIamPermissionsRequest() if resource is not None: request.resource = resource diff --git a/google/cloud/spanner_v1/services/spanner/async_client.py b/google/cloud/spanner_v1/services/spanner/async_client.py index a4a188bc97..becb983ed7 100644 --- a/google/cloud/spanner_v1/services/spanner/async_client.py +++ b/google/cloud/spanner_v1/services/spanner/async_client.py @@ -79,8 +79,36 @@ class SpannerAsyncClient: common_location_path = staticmethod(SpannerClient.common_location_path) parse_common_location_path = staticmethod(SpannerClient.parse_common_location_path) - from_service_account_info = SpannerClient.from_service_account_info - from_service_account_file = SpannerClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpannerAsyncClient: The constructed client. + """ + return SpannerClient.from_service_account_info.__func__(SpannerAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpannerAsyncClient: The constructed client. + """ + return SpannerClient.from_service_account_file.__func__(SpannerAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property diff --git a/synth.metadata b/synth.metadata index eff510075a..fd1a836d0b 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "f829b1334cce86aa3738f3c0698d814b56664445", - "internalRef": "358725120" + "sha": "07932bb995e7dc91b43620ea8402c6668c7d102c", + "internalRef": "359562873" } }, { diff --git a/tests/unit/gapic/spanner_admin_database_v1/__init__.py b/tests/unit/gapic/spanner_admin_database_v1/__init__.py index 8b13789179..42ffdf2bc4 100644 --- a/tests/unit/gapic/spanner_admin_database_v1/__init__.py +++ b/tests/unit/gapic/spanner_admin_database_v1/__init__.py @@ -1 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py index 86eba5e283..1906328473 100644 --- a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py +++ b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py @@ -104,15 +104,19 @@ def test__get_default_mtls_endpoint(): ) -def test_database_admin_client_from_service_account_info(): +@pytest.mark.parametrize( + "client_class", [DatabaseAdminClient, DatabaseAdminAsyncClient,] +) +def test_database_admin_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = DatabaseAdminClient.from_service_account_info(info) + client = client_class.from_service_account_info(info) assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "spanner.googleapis.com:443" @@ -128,9 +132,11 @@ def test_database_admin_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "spanner.googleapis.com:443" @@ -493,6 +499,22 @@ def test_list_databases_from_dict(): test_list_databases(request_type=dict) +def test_list_databases_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_databases), "__call__") as call: + client.list_databases() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.ListDatabasesRequest() + + @pytest.mark.asyncio async def test_list_databases_async( transport: str = "grpc_asyncio", @@ -842,6 +864,22 @@ def test_create_database_from_dict(): test_create_database(request_type=dict) +def test_create_database_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_database), "__call__") as call: + client.create_database() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.CreateDatabaseRequest() + + @pytest.mark.asyncio async def test_create_database_async( transport: str = "grpc_asyncio", @@ -1052,6 +1090,22 @@ def test_get_database_from_dict(): test_get_database(request_type=dict) +def test_get_database_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database), "__call__") as call: + client.get_database() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.GetDatabaseRequest() + + @pytest.mark.asyncio async def test_get_database_async( transport: str = "grpc_asyncio", @@ -1252,6 +1306,24 @@ def test_update_database_ddl_from_dict(): test_update_database_ddl(request_type=dict) +def test_update_database_ddl_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_database_ddl), "__call__" + ) as call: + client.update_database_ddl() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.UpdateDatabaseDdlRequest() + + @pytest.mark.asyncio async def test_update_database_ddl_async( transport: str = "grpc_asyncio", @@ -1461,6 +1533,22 @@ def test_drop_database_from_dict(): test_drop_database(request_type=dict) +def test_drop_database_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_database), "__call__") as call: + client.drop_database() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.DropDatabaseRequest() + + @pytest.mark.asyncio async def test_drop_database_async( transport: str = "grpc_asyncio", @@ -1647,6 +1735,22 @@ def test_get_database_ddl_from_dict(): test_get_database_ddl(request_type=dict) +def test_get_database_ddl_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_database_ddl), "__call__") as call: + client.get_database_ddl() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.GetDatabaseDdlRequest() + + @pytest.mark.asyncio async def test_get_database_ddl_async( transport: str = "grpc_asyncio", @@ -1843,6 +1947,22 @@ def test_set_iam_policy_from_dict(): test_set_iam_policy(request_type=dict) +def test_set_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + client.set_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + @pytest.mark.asyncio async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy.SetIamPolicyRequest @@ -2050,6 +2170,22 @@ def test_get_iam_policy_from_dict(): test_get_iam_policy(request_type=dict) +def test_get_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + client.get_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + @pytest.mark.asyncio async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy.GetIamPolicyRequest @@ -2259,6 +2395,24 @@ def test_test_iam_permissions_from_dict(): test_test_iam_permissions(request_type=dict) +def test_test_iam_permissions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + client.test_iam_permissions() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + @pytest.mark.asyncio async def test_test_iam_permissions_async( transport: str = "grpc_asyncio", request_type=iam_policy.TestIamPermissionsRequest @@ -2487,6 +2641,22 @@ def test_create_backup_from_dict(): test_create_backup(request_type=dict) +def test_create_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + client.create_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == gsad_backup.CreateBackupRequest() + + @pytest.mark.asyncio async def test_create_backup_async( transport: str = "grpc_asyncio", request_type=gsad_backup.CreateBackupRequest @@ -2710,6 +2880,22 @@ def test_get_backup_from_dict(): test_get_backup(request_type=dict) +def test_get_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + client.get_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.GetBackupRequest() + + @pytest.mark.asyncio async def test_get_backup_async( transport: str = "grpc_asyncio", request_type=backup.GetBackupRequest @@ -2925,6 +3111,22 @@ def test_update_backup_from_dict(): test_update_backup(request_type=dict) +def test_update_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + client.update_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == gsad_backup.UpdateBackupRequest() + + @pytest.mark.asyncio async def test_update_backup_async( transport: str = "grpc_asyncio", request_type=gsad_backup.UpdateBackupRequest @@ -3137,6 +3339,22 @@ def test_delete_backup_from_dict(): test_delete_backup(request_type=dict) +def test_delete_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + client.delete_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.DeleteBackupRequest() + + @pytest.mark.asyncio async def test_delete_backup_async( transport: str = "grpc_asyncio", request_type=backup.DeleteBackupRequest @@ -3320,6 +3538,22 @@ def test_list_backups_from_dict(): test_list_backups(request_type=dict) +def test_list_backups_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + client.list_backups() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.ListBackupsRequest() + + @pytest.mark.asyncio async def test_list_backups_async( transport: str = "grpc_asyncio", request_type=backup.ListBackupsRequest @@ -3622,6 +3856,22 @@ def test_restore_database_from_dict(): test_restore_database(request_type=dict) +def test_restore_database_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_database), "__call__") as call: + client.restore_database() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.RestoreDatabaseRequest() + + @pytest.mark.asyncio async def test_restore_database_async( transport: str = "grpc_asyncio", @@ -3839,6 +4089,24 @@ def test_list_database_operations_from_dict(): test_list_database_operations(request_type=dict) +def test_list_database_operations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_database_operations), "__call__" + ) as call: + client.list_database_operations() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_database_admin.ListDatabaseOperationsRequest() + + @pytest.mark.asyncio async def test_list_database_operations_async( transport: str = "grpc_asyncio", @@ -4203,6 +4471,24 @@ def test_list_backup_operations_from_dict(): test_list_backup_operations(request_type=dict) +def test_list_backup_operations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatabaseAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backup_operations), "__call__" + ) as call: + client.list_backup_operations() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == backup.ListBackupOperationsRequest() + + @pytest.mark.asyncio async def test_list_backup_operations_async( transport: str = "grpc_asyncio", request_type=backup.ListBackupOperationsRequest diff --git a/tests/unit/gapic/spanner_admin_instance_v1/__init__.py b/tests/unit/gapic/spanner_admin_instance_v1/__init__.py index 8b13789179..42ffdf2bc4 100644 --- a/tests/unit/gapic/spanner_admin_instance_v1/__init__.py +++ b/tests/unit/gapic/spanner_admin_instance_v1/__init__.py @@ -1 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py b/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py index e2caceee98..b64c5eca33 100644 --- a/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py +++ b/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py @@ -98,15 +98,19 @@ def test__get_default_mtls_endpoint(): ) -def test_instance_admin_client_from_service_account_info(): +@pytest.mark.parametrize( + "client_class", [InstanceAdminClient, InstanceAdminAsyncClient,] +) +def test_instance_admin_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = InstanceAdminClient.from_service_account_info(info) + client = client_class.from_service_account_info(info) assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "spanner.googleapis.com:443" @@ -122,9 +126,11 @@ def test_instance_admin_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "spanner.googleapis.com:443" @@ -490,6 +496,24 @@ def test_list_instance_configs_from_dict(): test_list_instance_configs(request_type=dict) +def test_list_instance_configs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instance_configs), "__call__" + ) as call: + client.list_instance_configs() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.ListInstanceConfigsRequest() + + @pytest.mark.asyncio async def test_list_instance_configs_async( transport: str = "grpc_asyncio", @@ -875,6 +899,24 @@ def test_get_instance_config_from_dict(): test_get_instance_config(request_type=dict) +def test_get_instance_config_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance_config), "__call__" + ) as call: + client.get_instance_config() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.GetInstanceConfigRequest() + + @pytest.mark.asyncio async def test_get_instance_config_async( transport: str = "grpc_asyncio", @@ -1083,6 +1125,22 @@ def test_list_instances_from_dict(): test_list_instances(request_type=dict) +def test_list_instances_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + client.list_instances() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.ListInstancesRequest() + + @pytest.mark.asyncio async def test_list_instances_async( transport: str = "grpc_asyncio", @@ -1452,6 +1510,22 @@ def test_get_instance_from_dict(): test_get_instance(request_type=dict) +def test_get_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + client.get_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.GetInstanceRequest() + + @pytest.mark.asyncio async def test_get_instance_async( transport: str = "grpc_asyncio", @@ -1658,6 +1732,22 @@ def test_create_instance_from_dict(): test_create_instance(request_type=dict) +def test_create_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + client.create_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.CreateInstanceRequest() + + @pytest.mark.asyncio async def test_create_instance_async( transport: str = "grpc_asyncio", @@ -1867,6 +1957,22 @@ def test_update_instance_from_dict(): test_update_instance(request_type=dict) +def test_update_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + client.update_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.UpdateInstanceRequest() + + @pytest.mark.asyncio async def test_update_instance_async( transport: str = "grpc_asyncio", @@ -2072,6 +2178,22 @@ def test_delete_instance_from_dict(): test_delete_instance(request_type=dict) +def test_delete_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + client.delete_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner_instance_admin.DeleteInstanceRequest() + + @pytest.mark.asyncio async def test_delete_instance_async( transport: str = "grpc_asyncio", @@ -2258,6 +2380,22 @@ def test_set_iam_policy_from_dict(): test_set_iam_policy(request_type=dict) +def test_set_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + client.set_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + @pytest.mark.asyncio async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy.SetIamPolicyRequest @@ -2465,6 +2603,22 @@ def test_get_iam_policy_from_dict(): test_get_iam_policy(request_type=dict) +def test_get_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + client.get_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + @pytest.mark.asyncio async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy.GetIamPolicyRequest @@ -2674,6 +2828,24 @@ def test_test_iam_permissions_from_dict(): test_test_iam_permissions(request_type=dict) +def test_test_iam_permissions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + client.test_iam_permissions() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + @pytest.mark.asyncio async def test_test_iam_permissions_async( transport: str = "grpc_asyncio", request_type=iam_policy.TestIamPermissionsRequest diff --git a/tests/unit/gapic/spanner_v1/__init__.py b/tests/unit/gapic/spanner_v1/__init__.py index 8b13789179..42ffdf2bc4 100644 --- a/tests/unit/gapic/spanner_v1/__init__.py +++ b/tests/unit/gapic/spanner_v1/__init__.py @@ -1 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/gapic/spanner_v1/test_spanner.py b/tests/unit/gapic/spanner_v1/test_spanner.py index 56d3818009..37ca9c6deb 100644 --- a/tests/unit/gapic/spanner_v1/test_spanner.py +++ b/tests/unit/gapic/spanner_v1/test_spanner.py @@ -87,15 +87,17 @@ def test__get_default_mtls_endpoint(): assert SpannerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi -def test_spanner_client_from_service_account_info(): +@pytest.mark.parametrize("client_class", [SpannerClient, SpannerAsyncClient,]) +def test_spanner_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = SpannerClient.from_service_account_info(info) + client = client_class.from_service_account_info(info) assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "spanner.googleapis.com:443" @@ -109,9 +111,11 @@ def test_spanner_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "spanner.googleapis.com:443" @@ -448,6 +452,22 @@ def test_create_session_from_dict(): test_create_session(request_type=dict) +def test_create_session_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_session), "__call__") as call: + client.create_session() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.CreateSessionRequest() + + @pytest.mark.asyncio async def test_create_session_async( transport: str = "grpc_asyncio", request_type=spanner.CreateSessionRequest @@ -635,6 +655,24 @@ def test_batch_create_sessions_from_dict(): test_batch_create_sessions(request_type=dict) +def test_batch_create_sessions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_sessions), "__call__" + ) as call: + client.batch_create_sessions() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.BatchCreateSessionsRequest() + + @pytest.mark.asyncio async def test_batch_create_sessions_async( transport: str = "grpc_asyncio", request_type=spanner.BatchCreateSessionsRequest @@ -844,6 +882,22 @@ def test_get_session_from_dict(): test_get_session(request_type=dict) +def test_get_session_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_session), "__call__") as call: + client.get_session() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.GetSessionRequest() + + @pytest.mark.asyncio async def test_get_session_async( transport: str = "grpc_asyncio", request_type=spanner.GetSessionRequest @@ -1033,6 +1087,22 @@ def test_list_sessions_from_dict(): test_list_sessions(request_type=dict) +def test_list_sessions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sessions), "__call__") as call: + client.list_sessions() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ListSessionsRequest() + + @pytest.mark.asyncio async def test_list_sessions_async( transport: str = "grpc_asyncio", request_type=spanner.ListSessionsRequest @@ -1343,6 +1413,22 @@ def test_delete_session_from_dict(): test_delete_session(request_type=dict) +def test_delete_session_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_session), "__call__") as call: + client.delete_session() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.DeleteSessionRequest() + + @pytest.mark.asyncio async def test_delete_session_async( transport: str = "grpc_asyncio", request_type=spanner.DeleteSessionRequest @@ -1522,6 +1608,22 @@ def test_execute_sql_from_dict(): test_execute_sql(request_type=dict) +def test_execute_sql_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_sql), "__call__") as call: + client.execute_sql() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ExecuteSqlRequest() + + @pytest.mark.asyncio async def test_execute_sql_async( transport: str = "grpc_asyncio", request_type=spanner.ExecuteSqlRequest @@ -1644,6 +1746,24 @@ def test_execute_streaming_sql_from_dict(): test_execute_streaming_sql(request_type=dict) +def test_execute_streaming_sql_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_streaming_sql), "__call__" + ) as call: + client.execute_streaming_sql() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ExecuteSqlRequest() + + @pytest.mark.asyncio async def test_execute_streaming_sql_async( transport: str = "grpc_asyncio", request_type=spanner.ExecuteSqlRequest @@ -1775,6 +1895,24 @@ def test_execute_batch_dml_from_dict(): test_execute_batch_dml(request_type=dict) +def test_execute_batch_dml_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_batch_dml), "__call__" + ) as call: + client.execute_batch_dml() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ExecuteBatchDmlRequest() + + @pytest.mark.asyncio async def test_execute_batch_dml_async( transport: str = "grpc_asyncio", request_type=spanner.ExecuteBatchDmlRequest @@ -1899,6 +2037,22 @@ def test_read_from_dict(): test_read(request_type=dict) +def test_read_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read), "__call__") as call: + client.read() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ReadRequest() + + @pytest.mark.asyncio async def test_read_async( transport: str = "grpc_asyncio", request_type=spanner.ReadRequest @@ -2017,6 +2171,22 @@ def test_streaming_read_from_dict(): test_streaming_read(request_type=dict) +def test_streaming_read_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.streaming_read), "__call__") as call: + client.streaming_read() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.ReadRequest() + + @pytest.mark.asyncio async def test_streaming_read_async( transport: str = "grpc_asyncio", request_type=spanner.ReadRequest @@ -2144,6 +2314,24 @@ def test_begin_transaction_from_dict(): test_begin_transaction(request_type=dict) +def test_begin_transaction_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.begin_transaction), "__call__" + ) as call: + client.begin_transaction() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.BeginTransactionRequest() + + @pytest.mark.asyncio async def test_begin_transaction_async( transport: str = "grpc_asyncio", request_type=spanner.BeginTransactionRequest @@ -2355,6 +2543,22 @@ def test_commit_from_dict(): test_commit(request_type=dict) +def test_commit_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.commit), "__call__") as call: + client.commit() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.CommitRequest() + + @pytest.mark.asyncio async def test_commit_async( transport: str = "grpc_asyncio", request_type=spanner.CommitRequest @@ -2581,6 +2785,22 @@ def test_rollback_from_dict(): test_rollback(request_type=dict) +def test_rollback_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.rollback), "__call__") as call: + client.rollback() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.RollbackRequest() + + @pytest.mark.asyncio async def test_rollback_async( transport: str = "grpc_asyncio", request_type=spanner.RollbackRequest @@ -2774,6 +2994,22 @@ def test_partition_query_from_dict(): test_partition_query(request_type=dict) +def test_partition_query_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.partition_query), "__call__") as call: + client.partition_query() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.PartitionQueryRequest() + + @pytest.mark.asyncio async def test_partition_query_async( transport: str = "grpc_asyncio", request_type=spanner.PartitionQueryRequest @@ -2894,6 +3130,22 @@ def test_partition_read_from_dict(): test_partition_read(request_type=dict) +def test_partition_read_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.partition_read), "__call__") as call: + client.partition_read() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == spanner.PartitionReadRequest() + + @pytest.mark.asyncio async def test_partition_read_async( transport: str = "grpc_asyncio", request_type=spanner.PartitionReadRequest From 849de1d01f7dd5512fe0f4b78b8634d32051749e Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Mon, 29 Mar 2021 03:17:38 -0700 Subject: [PATCH 03/10] feat(spanner): add `optimizer_statistics_package` field in `QueryOptions` PiperOrigin-RevId: 360758638 Source-Author: Google APIs Source-Date: Wed Mar 3 14:32:33 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: dff6e4625d4ea0a16fc44d3b9be115219c403f07 Source-Link: https://github.com/googleapis/googleapis/commit/dff6e4625d4ea0a16fc44d3b9be115219c403f07 --- google/cloud/spanner_v1/proto/keys.proto | 2 +- .../cloud/spanner_v1/proto/query_plan.proto | 2 +- .../cloud/spanner_v1/proto/result_set.proto | 2 +- google/cloud/spanner_v1/proto/spanner.proto | 41 ++++++++++++--- .../cloud/spanner_v1/proto/transaction.proto | 16 +++--- google/cloud/spanner_v1/proto/type.proto | 2 +- google/cloud/spanner_v1/types/spanner.py | 50 +++++++++++++++---- google/cloud/spanner_v1/types/transaction.py | 14 +++--- synth.metadata | 4 +- 9 files changed, 97 insertions(+), 36 deletions(-) diff --git a/google/cloud/spanner_v1/proto/keys.proto b/google/cloud/spanner_v1/proto/keys.proto index 267df0d102..d8ce0d6774 100644 --- a/google/cloud/spanner_v1/proto/keys.proto +++ b/google/cloud/spanner_v1/proto/keys.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/google/cloud/spanner_v1/proto/query_plan.proto b/google/cloud/spanner_v1/proto/query_plan.proto index 974a70e6d1..35f8fe21c5 100644 --- a/google/cloud/spanner_v1/proto/query_plan.proto +++ b/google/cloud/spanner_v1/proto/query_plan.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/google/cloud/spanner_v1/proto/result_set.proto b/google/cloud/spanner_v1/proto/result_set.proto index a87d741fdc..d6bb9a2831 100644 --- a/google/cloud/spanner_v1/proto/result_set.proto +++ b/google/cloud/spanner_v1/proto/result_set.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/google/cloud/spanner_v1/proto/spanner.proto b/google/cloud/spanner_v1/proto/spanner.proto index 8f579e333d..75d37efd46 100644 --- a/google/cloud/spanner_v1/proto/spanner.proto +++ b/google/cloud/spanner_v1/proto/spanner.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -435,21 +435,50 @@ message ExecuteSqlRequest { // This parameter allows individual queries to pick different query // optimizer versions. // - // Specifying "latest" as a value instructs Cloud Spanner to use the + // Specifying `latest` as a value instructs Cloud Spanner to use the // latest supported query optimizer version. If not specified, Cloud Spanner - // uses optimizer version set at the database level options. Any other + // uses the optimizer version set at the database level options. Any other // positive integer (from the list of supported optimizer versions) // overrides the default optimizer version for query execution. + // // The list of supported optimizer versions can be queried from - // SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a SQL statement - // with an invalid optimizer version will fail with a syntax error - // (`INVALID_ARGUMENT`) status. + // SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. + // + // Executing a SQL statement with an invalid optimizer version fails with + // an `INVALID_ARGUMENT` error. + // // See // https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer // for more information on managing the query optimizer. // // The `optimizer_version` statement hint has precedence over this setting. string optimizer_version = 1; + + // Query optimizer statistics package to use. + // + // This parameter allows individual queries to use a different query + // optimizer statistics. + // + // Specifying `latest` as a value instructs Cloud Spanner to use the latest + // generated statistics package. If not specified, Cloud Spanner uses + // statistics package set at the database level options, or latest if + // the database option is not set. + // + // The statistics package requested by the query has to be exempt from + // garbage collection. This can be achieved with the following DDL + // statement: + // + // ``` + // ALTER STATISTICS SET OPTIONS (allow_gc=false) + // ``` + // + // The list of available statistics packages can be queried from + // `SPANNER_SYS.OPTIMIZER_STATISTICS_PACKAGES`. + // + // Executing a SQL statement with an invalid optimizer statistics package + // or with statistics package that allows garbage collection fails with + // an `INVALID_ARGUMENT` error. + string optimizer_statistics_package = 2; } // Mode in which the statement must be processed. diff --git a/google/cloud/spanner_v1/proto/transaction.proto b/google/cloud/spanner_v1/proto/transaction.proto index 2cafefcb10..30ef9dc84a 100644 --- a/google/cloud/spanner_v1/proto/transaction.proto +++ b/google/cloud/spanner_v1/proto/transaction.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -93,7 +93,7 @@ option ruby_package = "Google::Cloud::Spanner::V1"; // [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the // transaction. // -// ### Semantics +// ## Semantics // // Cloud Spanner can commit the transaction if all read locks it acquired // are still valid at commit time, and it is able to acquire write @@ -106,7 +106,7 @@ option ruby_package = "Google::Cloud::Spanner::V1"; // use Cloud Spanner locks for any sort of mutual exclusion other than // between Cloud Spanner transactions themselves. // -// ### Retrying Aborted Transactions +// ## Retrying Aborted Transactions // // When a transaction aborts, the application can choose to retry the // whole transaction again. To maximize the chances of successfully @@ -122,7 +122,7 @@ option ruby_package = "Google::Cloud::Spanner::V1"; // instead, it is better to limit the total amount of wall time spent // retrying. // -// ### Idle Transactions +// ## Idle Transactions // // A transaction is considered idle if it has no outstanding reads or // SQL queries and has not started a read or SQL query within the last 10 @@ -172,7 +172,7 @@ option ruby_package = "Google::Cloud::Spanner::V1"; // // Each type of timestamp bound is discussed in detail below. // -// ### Strong +// ## Strong // // Strong reads are guaranteed to see the effects of all transactions // that have committed before the start of the read. Furthermore, all @@ -188,7 +188,7 @@ option ruby_package = "Google::Cloud::Spanner::V1"; // // See [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. // -// ### Exact Staleness +// ## Exact Staleness // // These timestamp bounds execute reads at a user-specified // timestamp. Reads at a timestamp are guaranteed to see a consistent @@ -210,7 +210,7 @@ option ruby_package = "Google::Cloud::Spanner::V1"; // See [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] and // [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. // -// ### Bounded Staleness +// ## Bounded Staleness // // Bounded staleness modes allow Cloud Spanner to pick the read timestamp, // subject to a user-provided staleness bound. Cloud Spanner chooses the @@ -240,7 +240,7 @@ option ruby_package = "Google::Cloud::Spanner::V1"; // See [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] and // [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. // -// ### Old Read Timestamps and Garbage Collection +// ## Old Read Timestamps and Garbage Collection // // Cloud Spanner continuously garbage collects deleted and overwritten data // in the background to reclaim storage space. This process is known diff --git a/google/cloud/spanner_v1/proto/type.proto b/google/cloud/spanner_v1/proto/type.proto index 1b863c0fdf..4a5afd485d 100644 --- a/google/cloud/spanner_v1/proto/type.proto +++ b/google/cloud/spanner_v1/proto/type.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/google/cloud/spanner_v1/types/spanner.py b/google/cloud/spanner_v1/types/spanner.py index 1dfd8451fe..5f818b5023 100644 --- a/google/cloud/spanner_v1/types/spanner.py +++ b/google/cloud/spanner_v1/types/spanner.py @@ -353,25 +353,57 @@ class QueryOptions(proto.Message): This parameter allows individual queries to pick different query optimizer versions. - Specifying "latest" as a value instructs Cloud Spanner to + Specifying ``latest`` as a value instructs Cloud Spanner to use the latest supported query optimizer version. If not - specified, Cloud Spanner uses optimizer version set at the - database level options. Any other positive integer (from the - list of supported optimizer versions) overrides the default - optimizer version for query execution. The list of supported - optimizer versions can be queried from - SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a SQL - statement with an invalid optimizer version will fail with a - syntax error (``INVALID_ARGUMENT``) status. See + specified, Cloud Spanner uses the optimizer version set at + the database level options. Any other positive integer (from + the list of supported optimizer versions) overrides the + default optimizer version for query execution. + + The list of supported optimizer versions can be queried from + SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. + + Executing a SQL statement with an invalid optimizer version + fails with an ``INVALID_ARGUMENT`` error. + + See https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer for more information on managing the query optimizer. The ``optimizer_version`` statement hint has precedence over this setting. + optimizer_statistics_package (str): + Query optimizer statistics package to use. + + This parameter allows individual queries to use a different + query optimizer statistics. + + Specifying ``latest`` as a value instructs Cloud Spanner to + use the latest generated statistics package. If not + specified, Cloud Spanner uses statistics package set at the + database level options, or latest if the database option is + not set. + + The statistics package requested by the query has to be + exempt from garbage collection. This can be achieved with + the following DDL statement: + + :: + + ALTER STATISTICS SET OPTIONS (allow_gc=false) + + The list of available statistics packages can be queried + from ``SPANNER_SYS.OPTIMIZER_STATISTICS_PACKAGES``. + + Executing a SQL statement with an invalid optimizer + statistics package or with statistics package that allows + garbage collection fails with an ``INVALID_ARGUMENT`` error. """ optimizer_version = proto.Field(proto.STRING, number=1) + optimizer_statistics_package = proto.Field(proto.STRING, number=2) + session = proto.Field(proto.STRING, number=1) transaction = proto.Field( diff --git a/google/cloud/spanner_v1/types/transaction.py b/google/cloud/spanner_v1/types/transaction.py index 744fe2597a..ddda88b384 100644 --- a/google/cloud/spanner_v1/types/transaction.py +++ b/google/cloud/spanner_v1/types/transaction.py @@ -98,7 +98,7 @@ class TransactionOptions(proto.Message): transaction. Semantics - ~~~~~~~~~ + --------- Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire @@ -113,7 +113,7 @@ class TransactionOptions(proto.Message): than between Cloud Spanner transactions themselves. Retrying Aborted Transactions - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ----------------------------- When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully @@ -130,7 +130,7 @@ class TransactionOptions(proto.Message): retrying. Idle Transactions - ~~~~~~~~~~~~~~~~~ + ----------------- A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last @@ -182,7 +182,7 @@ class TransactionOptions(proto.Message): Each type of timestamp bound is discussed in detail below. Strong - ~~~~~~ + ------ Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all @@ -200,7 +200,7 @@ class TransactionOptions(proto.Message): [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. Exact Staleness - ~~~~~~~~~~~~~~~ + --------------- These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of @@ -225,7 +225,7 @@ class TransactionOptions(proto.Message): [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. Bounded Staleness - ~~~~~~~~~~~~~~~~~ + ----------------- Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner @@ -258,7 +258,7 @@ class TransactionOptions(proto.Message): [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. Old Read Timestamps and Garbage Collection - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ------------------------------------------ Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is diff --git a/synth.metadata b/synth.metadata index fd1a836d0b..177a0c0309 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "07932bb995e7dc91b43620ea8402c6668c7d102c", - "internalRef": "359562873" + "sha": "dff6e4625d4ea0a16fc44d3b9be115219c403f07", + "internalRef": "360758638" } }, { From 80f52dfc1baf0eadaf5000ec3e334cedf341b5f2 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Mon, 29 Mar 2021 03:20:00 -0700 Subject: [PATCH 04/10] chore: upgrade gapic-generator-python to 0.42.2 PiperOrigin-RevId: 361662015 Source-Author: Google APIs Source-Date: Mon Mar 8 14:47:18 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: 28a591963253d52ce3a25a918cafbdd9928de8cf Source-Link: https://github.com/googleapis/googleapis/commit/28a591963253d52ce3a25a918cafbdd9928de8cf --- .../services/database_admin/client.py | 18 ++-- .../types/__init__.py | 84 +++++++++---------- .../services/instance_admin/client.py | 18 ++-- .../types/__init__.py | 32 +++---- google/cloud/spanner_v1/types/__init__.py | 76 ++++++++--------- synth.metadata | 4 +- 6 files changed, 116 insertions(+), 116 deletions(-) diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py index 45db079111..4dfb39e47b 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py @@ -1082,12 +1082,12 @@ def set_iam_policy( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy.SetIamPolicyRequest(**request) - elif not request: + # Null request, just make one. request = iam_policy.SetIamPolicyRequest() if resource is not None: @@ -1218,12 +1218,12 @@ def get_iam_policy( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy.GetIamPolicyRequest(**request) - elif not request: + # Null request, just make one. request = iam_policy.GetIamPolicyRequest() if resource is not None: @@ -1309,12 +1309,12 @@ def test_iam_permissions( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy.TestIamPermissionsRequest(**request) - elif not request: + # Null request, just make one. request = iam_policy.TestIamPermissionsRequest() if resource is not None: diff --git a/google/cloud/spanner_admin_database_v1/types/__init__.py b/google/cloud/spanner_admin_database_v1/types/__init__.py index 9749add377..a1316e789a 100644 --- a/google/cloud/spanner_admin_database_v1/types/__init__.py +++ b/google/cloud/spanner_admin_database_v1/types/__init__.py @@ -15,80 +15,80 @@ # limitations under the License. # -from .common import ( - OperationProgress, - EncryptionConfig, - EncryptionInfo, -) from .backup import ( Backup, - CreateBackupRequest, + BackupInfo, + CreateBackupEncryptionConfig, CreateBackupMetadata, - UpdateBackupRequest, - GetBackupRequest, + CreateBackupRequest, DeleteBackupRequest, - ListBackupsRequest, - ListBackupsResponse, + GetBackupRequest, ListBackupOperationsRequest, ListBackupOperationsResponse, - BackupInfo, - CreateBackupEncryptionConfig, + ListBackupsRequest, + ListBackupsResponse, + UpdateBackupRequest, +) +from .common import ( + EncryptionConfig, + EncryptionInfo, + OperationProgress, ) from .spanner_database_admin import ( - RestoreInfo, - Database, - ListDatabasesRequest, - ListDatabasesResponse, - CreateDatabaseRequest, CreateDatabaseMetadata, - GetDatabaseRequest, - UpdateDatabaseDdlRequest, - UpdateDatabaseDdlMetadata, + CreateDatabaseRequest, + Database, DropDatabaseRequest, GetDatabaseDdlRequest, GetDatabaseDdlResponse, + GetDatabaseRequest, ListDatabaseOperationsRequest, ListDatabaseOperationsResponse, - RestoreDatabaseRequest, + ListDatabasesRequest, + ListDatabasesResponse, + OptimizeRestoredDatabaseMetadata, RestoreDatabaseEncryptionConfig, RestoreDatabaseMetadata, - OptimizeRestoredDatabaseMetadata, + RestoreDatabaseRequest, + RestoreInfo, + UpdateDatabaseDdlMetadata, + UpdateDatabaseDdlRequest, RestoreSourceType, ) __all__ = ( - "OperationProgress", - "EncryptionConfig", - "EncryptionInfo", "Backup", - "CreateBackupRequest", + "BackupInfo", + "CreateBackupEncryptionConfig", "CreateBackupMetadata", - "UpdateBackupRequest", - "GetBackupRequest", + "CreateBackupRequest", "DeleteBackupRequest", - "ListBackupsRequest", - "ListBackupsResponse", + "GetBackupRequest", "ListBackupOperationsRequest", "ListBackupOperationsResponse", - "BackupInfo", - "CreateBackupEncryptionConfig", - "RestoreInfo", - "Database", - "ListDatabasesRequest", - "ListDatabasesResponse", - "CreateDatabaseRequest", + "ListBackupsRequest", + "ListBackupsResponse", + "UpdateBackupRequest", + "EncryptionConfig", + "EncryptionInfo", + "OperationProgress", "CreateDatabaseMetadata", - "GetDatabaseRequest", - "UpdateDatabaseDdlRequest", - "UpdateDatabaseDdlMetadata", + "CreateDatabaseRequest", + "Database", "DropDatabaseRequest", "GetDatabaseDdlRequest", "GetDatabaseDdlResponse", + "GetDatabaseRequest", "ListDatabaseOperationsRequest", "ListDatabaseOperationsResponse", - "RestoreDatabaseRequest", + "ListDatabasesRequest", + "ListDatabasesResponse", + "OptimizeRestoredDatabaseMetadata", "RestoreDatabaseEncryptionConfig", "RestoreDatabaseMetadata", - "OptimizeRestoredDatabaseMetadata", + "RestoreDatabaseRequest", + "RestoreInfo", + "UpdateDatabaseDdlMetadata", + "UpdateDatabaseDdlRequest", "RestoreSourceType", ) diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py index 0985044bc2..99cad77f03 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py @@ -1179,12 +1179,12 @@ def set_iam_policy( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy.SetIamPolicyRequest(**request) - elif not request: + # Null request, just make one. request = iam_policy.SetIamPolicyRequest() if resource is not None: @@ -1311,12 +1311,12 @@ def get_iam_policy( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy.GetIamPolicyRequest(**request) - elif not request: + # Null request, just make one. request = iam_policy.GetIamPolicyRequest() if resource is not None: @@ -1399,12 +1399,12 @@ def test_iam_permissions( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy.TestIamPermissionsRequest(**request) - elif not request: + # Null request, just make one. request = iam_policy.TestIamPermissionsRequest() if resource is not None: diff --git a/google/cloud/spanner_admin_instance_v1/types/__init__.py b/google/cloud/spanner_admin_instance_v1/types/__init__.py index 37b771feed..f5ebcd7d5c 100644 --- a/google/cloud/spanner_admin_instance_v1/types/__init__.py +++ b/google/cloud/spanner_admin_instance_v1/types/__init__.py @@ -16,35 +16,35 @@ # from .spanner_instance_admin import ( - ReplicaInfo, - InstanceConfig, + CreateInstanceMetadata, + CreateInstanceRequest, + DeleteInstanceRequest, + GetInstanceConfigRequest, + GetInstanceRequest, Instance, + InstanceConfig, ListInstanceConfigsRequest, ListInstanceConfigsResponse, - GetInstanceConfigRequest, - GetInstanceRequest, - CreateInstanceRequest, ListInstancesRequest, ListInstancesResponse, - UpdateInstanceRequest, - DeleteInstanceRequest, - CreateInstanceMetadata, + ReplicaInfo, UpdateInstanceMetadata, + UpdateInstanceRequest, ) __all__ = ( - "ReplicaInfo", - "InstanceConfig", + "CreateInstanceMetadata", + "CreateInstanceRequest", + "DeleteInstanceRequest", + "GetInstanceConfigRequest", + "GetInstanceRequest", "Instance", + "InstanceConfig", "ListInstanceConfigsRequest", "ListInstanceConfigsResponse", - "GetInstanceConfigRequest", - "GetInstanceRequest", - "CreateInstanceRequest", "ListInstancesRequest", "ListInstancesResponse", - "UpdateInstanceRequest", - "DeleteInstanceRequest", - "CreateInstanceMetadata", + "ReplicaInfo", "UpdateInstanceMetadata", + "UpdateInstanceRequest", ) diff --git a/google/cloud/spanner_v1/types/__init__.py b/google/cloud/spanner_v1/types/__init__.py index a71a15855c..f3ce88c3cf 100644 --- a/google/cloud/spanner_v1/types/__init__.py +++ b/google/cloud/spanner_v1/types/__init__.py @@ -24,44 +24,44 @@ PlanNode, QueryPlan, ) -from .transaction import ( - TransactionOptions, - Transaction, - TransactionSelector, -) -from .type import ( - Type, - StructType, - TypeCode, -) from .result_set import ( - ResultSet, PartialResultSet, + ResultSet, ResultSetMetadata, ResultSetStats, ) from .spanner import ( - CreateSessionRequest, BatchCreateSessionsRequest, BatchCreateSessionsResponse, - Session, - GetSessionRequest, - ListSessionsRequest, - ListSessionsResponse, + BeginTransactionRequest, + CommitRequest, + CommitResponse, + CreateSessionRequest, DeleteSessionRequest, - ExecuteSqlRequest, ExecuteBatchDmlRequest, ExecuteBatchDmlResponse, + ExecuteSqlRequest, + GetSessionRequest, + ListSessionsRequest, + ListSessionsResponse, + Partition, PartitionOptions, PartitionQueryRequest, PartitionReadRequest, - Partition, PartitionResponse, ReadRequest, - BeginTransactionRequest, - CommitRequest, - CommitResponse, RollbackRequest, + Session, +) +from .transaction import ( + Transaction, + TransactionOptions, + TransactionSelector, +) +from .type import ( + StructType, + Type, + TypeCode, ) __all__ = ( @@ -70,35 +70,35 @@ "Mutation", "PlanNode", "QueryPlan", - "TransactionOptions", - "Transaction", - "TransactionSelector", - "Type", - "StructType", - "TypeCode", - "ResultSet", "PartialResultSet", + "ResultSet", "ResultSetMetadata", "ResultSetStats", - "CreateSessionRequest", "BatchCreateSessionsRequest", "BatchCreateSessionsResponse", - "Session", - "GetSessionRequest", - "ListSessionsRequest", - "ListSessionsResponse", + "BeginTransactionRequest", + "CommitRequest", + "CommitResponse", + "CreateSessionRequest", "DeleteSessionRequest", - "ExecuteSqlRequest", "ExecuteBatchDmlRequest", "ExecuteBatchDmlResponse", + "ExecuteSqlRequest", + "GetSessionRequest", + "ListSessionsRequest", + "ListSessionsResponse", + "Partition", "PartitionOptions", "PartitionQueryRequest", "PartitionReadRequest", - "Partition", "PartitionResponse", "ReadRequest", - "BeginTransactionRequest", - "CommitRequest", - "CommitResponse", "RollbackRequest", + "Session", + "Transaction", + "TransactionOptions", + "TransactionSelector", + "StructType", + "Type", + "TypeCode", ) diff --git a/synth.metadata b/synth.metadata index 177a0c0309..113ee6ffa9 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "dff6e4625d4ea0a16fc44d3b9be115219c403f07", - "internalRef": "360758638" + "sha": "28a591963253d52ce3a25a918cafbdd9928de8cf", + "internalRef": "361662015" } }, { From 91fa7709f4afea64b05281907b01c7bcd25c1cb7 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Mon, 29 Mar 2021 03:24:10 -0700 Subject: [PATCH 05/10] chore: upgrade gapic-generator-python to 0.43.1 PiperOrigin-RevId: 364411656 Source-Author: Google APIs Source-Date: Mon Mar 22 14:40:22 2021 -0700 Source-Repo: googleapis/googleapis Source-Sha: 149a3a84c29c9b8189576c7442ccb6dcf6a8f95b Source-Link: https://github.com/googleapis/googleapis/commit/149a3a84c29c9b8189576c7442ccb6dcf6a8f95b --- .../services/database_admin/async_client.py | 12 ++ .../database_admin/transports/base.py | 30 +++-- .../database_admin/transports/grpc.py | 103 ++++++---------- .../database_admin/transports/grpc_asyncio.py | 111 +++++++----------- .../services/instance_admin/async_client.py | 6 + .../instance_admin/transports/base.py | 24 ++-- .../instance_admin/transports/grpc.py | 103 ++++++---------- .../instance_admin/transports/grpc_asyncio.py | 111 +++++++----------- .../services/spanner/async_client.py | 13 ++ .../services/spanner/transports/base.py | 31 +++-- .../services/spanner/transports/grpc.py | 101 ++++++---------- .../spanner/transports/grpc_asyncio.py | 109 +++++++---------- synth.metadata | 4 +- 13 files changed, 329 insertions(+), 429 deletions(-) diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py index bcc9da15e3..e40e0b1960 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py @@ -264,6 +264,7 @@ async def list_databases( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -459,6 +460,7 @@ async def get_database( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -587,6 +589,7 @@ async def update_database_ddl( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -670,6 +673,7 @@ async def drop_database( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -754,6 +758,7 @@ async def get_database_ddl( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1035,6 +1040,7 @@ async def get_iam_policy( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -1330,6 +1336,7 @@ async def get_backup( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1430,6 +1437,7 @@ async def update_backup( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1509,6 +1517,7 @@ async def delete_backup( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1594,6 +1603,7 @@ async def list_backups( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1823,6 +1833,7 @@ async def list_database_operations( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1925,6 +1936,7 @@ async def list_backup_operations( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py index 779f02e840..0e9a7e50c7 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py @@ -79,10 +79,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -90,6 +90,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -99,20 +102,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -125,6 +125,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -141,6 +142,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -154,6 +156,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -167,6 +170,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -180,6 +184,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -196,6 +201,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -217,6 +223,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -230,6 +237,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -243,6 +251,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -256,6 +265,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -272,6 +282,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -285,6 +296,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py index 665ed4fc15..b695a5a113 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py @@ -118,7 +118,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -126,70 +129,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -197,18 +180,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -222,7 +195,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py index 25229d58cd..cac4b1e2b6 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py @@ -73,7 +73,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -151,10 +151,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -163,7 +163,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -171,70 +174,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -242,18 +225,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py index 2a2ac56db3..f2a9c36243 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py @@ -268,6 +268,7 @@ async def list_instance_configs( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -359,6 +360,7 @@ async def get_instance_config( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -444,6 +446,7 @@ async def list_instances( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -532,6 +535,7 @@ async def get_instance( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -911,6 +915,7 @@ async def delete_instance( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1183,6 +1188,7 @@ async def get_iam_policy( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py index fa07b95eeb..e3b368c82a 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py @@ -77,10 +77,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -88,6 +88,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -97,20 +100,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -123,6 +123,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -136,6 +137,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -149,6 +151,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -162,6 +165,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -181,6 +185,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -197,6 +202,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py index e896249468..a3e3f39762 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py @@ -131,7 +131,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -139,70 +142,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -210,18 +193,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -235,7 +208,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py index ca7f009071..e4a860874e 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py @@ -86,7 +86,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -164,10 +164,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -176,7 +176,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -184,70 +187,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -255,18 +238,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/spanner_v1/services/spanner/async_client.py b/google/cloud/spanner_v1/services/spanner/async_client.py index becb983ed7..d220c20f6e 100644 --- a/google/cloud/spanner_v1/services/spanner/async_client.py +++ b/google/cloud/spanner_v1/services/spanner/async_client.py @@ -250,6 +250,7 @@ async def create_session( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -347,6 +348,7 @@ async def batch_create_sessions( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -426,6 +428,7 @@ async def get_session( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -508,6 +511,7 @@ async def list_sessions( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -589,6 +593,7 @@ async def delete_session( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -659,6 +664,7 @@ async def execute_sql( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -826,6 +832,7 @@ async def execute_batch_dml( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -898,6 +905,7 @@ async def read( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -1049,6 +1057,7 @@ async def begin_transaction( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -1186,6 +1195,7 @@ async def commit( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=3600.0, ), default_timeout=3600.0, client_info=DEFAULT_CLIENT_INFO, @@ -1278,6 +1288,7 @@ async def rollback( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -1348,6 +1359,7 @@ async def partition_query( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, @@ -1422,6 +1434,7 @@ async def partition_read( maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/google/cloud/spanner_v1/services/spanner/transports/base.py b/google/cloud/spanner_v1/services/spanner/transports/base.py index 36e3c0cb52..f91b98d6fb 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/base.py +++ b/google/cloud/spanner_v1/services/spanner/transports/base.py @@ -73,10 +73,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -84,6 +84,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -93,20 +96,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -117,6 +117,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -128,6 +129,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -139,6 +141,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -150,6 +153,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -161,6 +165,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -172,6 +177,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -188,6 +194,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -199,6 +206,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -213,6 +221,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -224,6 +233,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=3600.0, ), default_timeout=3600.0, client_info=client_info, @@ -235,6 +245,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -246,6 +257,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, @@ -257,6 +269,7 @@ def _prep_wrapped_messages(self, client_info): maximum=32.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=30.0, ), default_timeout=30.0, client_info=client_info, diff --git a/google/cloud/spanner_v1/services/spanner/transports/grpc.py b/google/cloud/spanner_v1/services/spanner/transports/grpc.py index 2ac10fc5b3..0a3ead94e5 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/grpc.py +++ b/google/cloud/spanner_v1/services/spanner/transports/grpc.py @@ -111,7 +111,9 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -119,70 +121,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -190,17 +172,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -214,7 +187,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py b/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py index 265f4bb30a..a7c83ef512 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py +++ b/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py @@ -66,7 +66,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -144,10 +144,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -156,7 +156,9 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -164,70 +166,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -235,17 +217,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/synth.metadata b/synth.metadata index 113ee6ffa9..0e8a726321 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "28a591963253d52ce3a25a918cafbdd9928de8cf", - "internalRef": "361662015" + "sha": "149a3a84c29c9b8189576c7442ccb6dcf6a8f95b", + "internalRef": "364411656" } }, { From cdd1933746a5a2edda1f5e83879152fd75c72d9a Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Mon, 29 Mar 2021 03:24:51 -0700 Subject: [PATCH 06/10] feat: add RPC Priority request options PiperOrigin-RevId: 364449524 Source-Author: Google APIs Source-Date: Mon Mar 22 17:39:37 2021 -0700 Source-Repo: googleapis/googleapis Source-Sha: 6598bb829c9e9a534be674649ffd1b4671a821f9 Source-Link: https://github.com/googleapis/googleapis/commit/6598bb829c9e9a534be674649ffd1b4671a821f9 --- google/cloud/spanner_v1/proto/spanner.proto | 55 +++++++++++++++++++ google/cloud/spanner_v1/types/__init__.py | 2 + google/cloud/spanner_v1/types/spanner.py | 60 +++++++++++++++++++++ scripts/fixup_spanner_v1_keywords.py | 14 ++--- synth.metadata | 4 +- 5 files changed, 126 insertions(+), 9 deletions(-) diff --git a/google/cloud/spanner_v1/proto/spanner.proto b/google/cloud/spanner_v1/proto/spanner.proto index 75d37efd46..b4877cf919 100644 --- a/google/cloud/spanner_v1/proto/spanner.proto +++ b/google/cloud/spanner_v1/proto/spanner.proto @@ -425,6 +425,42 @@ message DeleteSessionRequest { ]; } +// Common request options for various APIs. +message RequestOptions { + // The relative priority for requests. Note that priority is not applicable + // for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. + // + // The priority acts as a hint to the Cloud Spanner scheduler and does not + // guarantee priority or order of execution. For example: + // + // * Some parts of a write operation always execute at `PRIORITY_HIGH`, + // regardless of the specified priority. This may cause you to see an + // increase in high priority workload even when executing a low priority + // request. This can also potentially cause a priority inversion where a + // lower priority request will be fulfilled ahead of a higher priority + // request. + // * If a transaction contains multiple operations with different priorities, + // Cloud Spanner does not guarantee to process the higher priority + // operations first. There may be other constraints to satisfy, such as + // order of operations. + enum Priority { + // `PRIORITY_UNSPECIFIED` is equivalent to `PRIORITY_HIGH`. + PRIORITY_UNSPECIFIED = 0; + + // This specifies that the request is low priority. + PRIORITY_LOW = 1; + + // This specifies that the request is medium priority. + PRIORITY_MEDIUM = 2; + + // This specifies that the request is high priority. + PRIORITY_HIGH = 3; + } + + // Priority for the request. + Priority priority = 1; +} + // The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. message ExecuteSqlRequest { @@ -576,6 +612,9 @@ message ExecuteSqlRequest { // Query optimizer configuration to use for the given query. QueryOptions query_options = 10; + + // Common options for this request. + RequestOptions request_options = 11; } // The request for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. @@ -642,6 +681,9 @@ message ExecuteBatchDmlRequest { // sequence number, the transaction may be aborted. Replays of previously // handled requests will yield the same response as the first execution. int64 seqno = 4 [(google.api.field_behavior) = REQUIRED]; + + // Common options for this request. + RequestOptions request_options = 5; } // The response for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list @@ -876,6 +918,9 @@ message ReadRequest { // match for the values of fields common to this message and the // PartitionReadRequest message used to create this partition_token. bytes partition_token = 10; + + // Common options for this request. + RequestOptions request_options = 11; } // The request for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. @@ -890,6 +935,13 @@ message BeginTransactionRequest { // Required. Options for the new transaction. TransactionOptions options = 2 [(google.api.field_behavior) = REQUIRED]; + + // Common options for this request. + // Priority is ignored for this request. Setting the priority in this + // request_options struct will not do anything. To set the priority for a + // transaction, set it on the reads and writes that are part of this + // transaction instead. + RequestOptions request_options = 3; } // The request for [Commit][google.spanner.v1.Spanner.Commit]. @@ -928,6 +980,9 @@ message CommitRequest { // the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats]. Default value is // `false`. bool return_commit_stats = 5; + + // Common options for this request. + RequestOptions request_options = 6; } // The response for [Commit][google.spanner.v1.Spanner.Commit]. diff --git a/google/cloud/spanner_v1/types/__init__.py b/google/cloud/spanner_v1/types/__init__.py index f3ce88c3cf..7a7ac395e4 100644 --- a/google/cloud/spanner_v1/types/__init__.py +++ b/google/cloud/spanner_v1/types/__init__.py @@ -50,6 +50,7 @@ PartitionReadRequest, PartitionResponse, ReadRequest, + RequestOptions, RollbackRequest, Session, ) @@ -93,6 +94,7 @@ "PartitionReadRequest", "PartitionResponse", "ReadRequest", + "RequestOptions", "RollbackRequest", "Session", "Transaction", diff --git a/google/cloud/spanner_v1/types/spanner.py b/google/cloud/spanner_v1/types/spanner.py index 5f818b5023..5ced795ef4 100644 --- a/google/cloud/spanner_v1/types/spanner.py +++ b/google/cloud/spanner_v1/types/spanner.py @@ -39,6 +39,7 @@ "ListSessionsRequest", "ListSessionsResponse", "DeleteSessionRequest", + "RequestOptions", "ExecuteSqlRequest", "ExecuteBatchDmlRequest", "ExecuteBatchDmlResponse", @@ -240,6 +241,41 @@ class DeleteSessionRequest(proto.Message): name = proto.Field(proto.STRING, number=1) +class RequestOptions(proto.Message): + r"""Common request options for various APIs. + + Attributes: + priority (google.cloud.spanner_v1.types.RequestOptions.Priority): + Priority for the request. + """ + + class Priority(proto.Enum): + r"""The relative priority for requests. Note that priority is not + applicable for + [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. + + The priority acts as a hint to the Cloud Spanner scheduler and does + not guarantee priority or order of execution. For example: + + - Some parts of a write operation always execute at + ``PRIORITY_HIGH``, regardless of the specified priority. This may + cause you to see an increase in high priority workload even when + executing a low priority request. This can also potentially cause + a priority inversion where a lower priority request will be + fulfilled ahead of a higher priority request. + - If a transaction contains multiple operations with different + priorities, Cloud Spanner does not guarantee to process the + higher priority operations first. There may be other constraints + to satisfy, such as order of operations. + """ + PRIORITY_UNSPECIFIED = 0 + PRIORITY_LOW = 1 + PRIORITY_MEDIUM = 2 + PRIORITY_HIGH = 3 + + priority = proto.Field(proto.ENUM, number=1, enum=Priority,) + + class ExecuteSqlRequest(proto.Message): r"""The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and @@ -335,6 +371,8 @@ class ExecuteSqlRequest(proto.Message): query_options (google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions): Query optimizer configuration to use for the given query. + request_options (google.cloud.spanner_v1.types.RequestOptions): + Common options for this request. """ class QueryMode(proto.Enum): @@ -428,6 +466,8 @@ class QueryOptions(proto.Message): query_options = proto.Field(proto.MESSAGE, number=10, message=QueryOptions,) + request_options = proto.Field(proto.MESSAGE, number=11, message="RequestOptions",) + class ExecuteBatchDmlRequest(proto.Message): r"""The request for @@ -466,6 +506,8 @@ class ExecuteBatchDmlRequest(proto.Message): sequence number, the transaction may be aborted. Replays of previously handled requests will yield the same response as the first execution. + request_options (google.cloud.spanner_v1.types.RequestOptions): + Common options for this request. """ class Statement(proto.Message): @@ -523,6 +565,8 @@ class Statement(proto.Message): seqno = proto.Field(proto.INT64, number=4) + request_options = proto.Field(proto.MESSAGE, number=5, message="RequestOptions",) + class ExecuteBatchDmlResponse(proto.Message): r"""The response for @@ -867,6 +911,8 @@ class ReadRequest(proto.Message): must be an exact match for the values of fields common to this message and the PartitionReadRequest message used to create this partition_token. + request_options (google.cloud.spanner_v1.types.RequestOptions): + Common options for this request. """ session = proto.Field(proto.STRING, number=1) @@ -889,6 +935,8 @@ class ReadRequest(proto.Message): partition_token = proto.Field(proto.BYTES, number=10) + request_options = proto.Field(proto.MESSAGE, number=11, message="RequestOptions",) + class BeginTransactionRequest(proto.Message): r"""The request for @@ -900,6 +948,12 @@ class BeginTransactionRequest(proto.Message): transaction runs. options (google.cloud.spanner_v1.types.TransactionOptions): Required. Options for the new transaction. + request_options (google.cloud.spanner_v1.types.RequestOptions): + Common options for this request. Priority is ignored for + this request. Setting the priority in this request_options + struct will not do anything. To set the priority for a + transaction, set it on the reads and writes that are part of + this transaction instead. """ session = proto.Field(proto.STRING, number=1) @@ -908,6 +962,8 @@ class BeginTransactionRequest(proto.Message): proto.MESSAGE, number=2, message=gs_transaction.TransactionOptions, ) + request_options = proto.Field(proto.MESSAGE, number=3, message="RequestOptions",) + class CommitRequest(proto.Message): r"""The request for [Commit][google.spanner.v1.Spanner.Commit]. @@ -938,6 +994,8 @@ class CommitRequest(proto.Message): be included in the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats]. Default value is ``false``. + request_options (google.cloud.spanner_v1.types.RequestOptions): + Common options for this request. """ session = proto.Field(proto.STRING, number=1) @@ -955,6 +1013,8 @@ class CommitRequest(proto.Message): return_commit_stats = proto.Field(proto.BOOL, number=5) + request_options = proto.Field(proto.MESSAGE, number=6, message="RequestOptions",) + class CommitResponse(proto.Message): r"""The response for [Commit][google.spanner.v1.Spanner.Commit]. diff --git a/scripts/fixup_spanner_v1_keywords.py b/scripts/fixup_spanner_v1_keywords.py index 19e3c0185b..4faf734dcb 100644 --- a/scripts/fixup_spanner_v1_keywords.py +++ b/scripts/fixup_spanner_v1_keywords.py @@ -42,20 +42,20 @@ class spannerCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'batch_create_sessions': ('database', 'session_count', 'session_template', ), - 'begin_transaction': ('session', 'options', ), - 'commit': ('session', 'transaction_id', 'single_use_transaction', 'mutations', 'return_commit_stats', ), + 'begin_transaction': ('session', 'options', 'request_options', ), + 'commit': ('session', 'transaction_id', 'single_use_transaction', 'mutations', 'return_commit_stats', 'request_options', ), 'create_session': ('database', 'session', ), 'delete_session': ('name', ), - 'execute_batch_dml': ('session', 'transaction', 'statements', 'seqno', ), - 'execute_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', ), - 'execute_streaming_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', ), + 'execute_batch_dml': ('session', 'transaction', 'statements', 'seqno', 'request_options', ), + 'execute_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', 'request_options', ), + 'execute_streaming_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', 'request_options', ), 'get_session': ('name', ), 'list_sessions': ('database', 'page_size', 'page_token', 'filter', ), 'partition_query': ('session', 'sql', 'transaction', 'params', 'param_types', 'partition_options', ), 'partition_read': ('session', 'table', 'key_set', 'transaction', 'index', 'columns', 'partition_options', ), - 'read': ('session', 'table', 'columns', 'key_set', 'transaction', 'index', 'limit', 'resume_token', 'partition_token', ), + 'read': ('session', 'table', 'columns', 'key_set', 'transaction', 'index', 'limit', 'resume_token', 'partition_token', 'request_options', ), 'rollback': ('session', 'transaction_id', ), - 'streaming_read': ('session', 'table', 'columns', 'key_set', 'transaction', 'index', 'limit', 'resume_token', 'partition_token', ), + 'streaming_read': ('session', 'table', 'columns', 'key_set', 'transaction', 'index', 'limit', 'resume_token', 'partition_token', 'request_options', ), } diff --git a/synth.metadata b/synth.metadata index 0e8a726321..4a63d02d2d 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "149a3a84c29c9b8189576c7442ccb6dcf6a8f95b", - "internalRef": "364411656" + "sha": "6598bb829c9e9a534be674649ffd1b4671a821f9", + "internalRef": "364449524" } }, { From 9aa7cb8679833bf32886113bb24112fbc1de7b66 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Mon, 29 Mar 2021 03:27:00 -0700 Subject: [PATCH 07/10] feat: add tagging request options PiperOrigin-RevId: 365498709 Source-Author: Google APIs Source-Date: Sun Mar 28 20:54:25 2021 -0700 Source-Repo: googleapis/googleapis Source-Sha: 6ce40ff8faf68226782f507ca6b2d497a77044de Source-Link: https://github.com/googleapis/googleapis/commit/6ce40ff8faf68226782f507ca6b2d497a77044de --- .../proto/spanner_database_admin.proto | 2 +- .../types/spanner_database_admin.py | 2 +- .../proto/spanner_instance_admin.proto | 4 +- google/cloud/spanner_v1/proto/spanner.proto | 33 ++++++++++++++--- google/cloud/spanner_v1/types/spanner.py | 37 +++++++++++++++---- synth.metadata | 4 +- 6 files changed, 63 insertions(+), 19 deletions(-) diff --git a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto index ac771bc061..f09cf073b2 100644 --- a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto +++ b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto @@ -736,7 +736,7 @@ message RestoreDatabaseRequest { // to. If this field is not specified, the restored database will use the same // encryption configuration as the backup by default, namely // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] - // = `USE_CONFIG_DEFAULT_OR_DATABASE_ENCRYPTION`. + // = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. RestoreDatabaseEncryptionConfig encryption_config = 4 [(google.api.field_behavior) = OPTIONAL]; } diff --git a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py index c7309dbbde..278d5e6b95 100644 --- a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py +++ b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py @@ -540,7 +540,7 @@ class RestoreDatabaseRequest(proto.Message): not specified, the restored database will use the same encryption configuration as the backup by default, namely [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] - = ``USE_CONFIG_DEFAULT_OR_DATABASE_ENCRYPTION``. + = ``USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION``. """ parent = proto.Field(proto.STRING, number=1) diff --git a/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto b/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto index 54767bf263..69043c1b37 100644 --- a/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto +++ b/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -373,7 +373,7 @@ message Instance { // either omitted or set to `CREATING`. For // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance], the state must be // either omitted or set to `READY`. - State state = 6; + State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; // Cloud Labels are a flexible and lightweight mechanism for organizing cloud // resources into groups that reflect a customer's organizational needs and diff --git a/google/cloud/spanner_v1/proto/spanner.proto b/google/cloud/spanner_v1/proto/spanner.proto index b4877cf919..c436227221 100644 --- a/google/cloud/spanner_v1/proto/spanner.proto +++ b/google/cloud/spanner_v1/proto/spanner.proto @@ -459,6 +459,27 @@ message RequestOptions { // Priority for the request. Priority priority = 1; + + // A per-request tag which can be applied to queries or reads, used for + // statistics collection. + // Both request_tag and transaction_tag can be specified for a read or query + // that belongs to a transaction. + // This field is ignored for requests where it's not applicable (e.g. + // CommitRequest). + // `request_tag` must be a valid identifier of the form: + // `[a-zA-Z][a-zA-Z0-9_\-]` between 2 and 64 characters in length + string request_tag = 2; + + // A tag used for statistics collection about this transaction. + // Both request_tag and transaction_tag can be specified for a read or query + // that belongs to a transaction. + // The value of transaction_tag should be the same for all requests belonging + // to the same transaction. + // If this request doesn’t belong to any transaction, transaction_tag will be + // ignored. + // `transaction_tag` must be a valid identifier of the format: + // `[a-zA-Z][a-zA-Z0-9_\-]{0,49}` + string transaction_tag = 3; } // The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and @@ -490,15 +511,15 @@ message ExecuteSqlRequest { // The `optimizer_version` statement hint has precedence over this setting. string optimizer_version = 1; - // Query optimizer statistics package to use. + // An option to control the selection of optimizer statistics package. // // This parameter allows individual queries to use a different query - // optimizer statistics. + // optimizer statistics package. // // Specifying `latest` as a value instructs Cloud Spanner to use the latest // generated statistics package. If not specified, Cloud Spanner uses - // statistics package set at the database level options, or latest if - // the database option is not set. + // the statistics package set at the database level options, or the latest + // package if the database option is not set. // // The statistics package requested by the query has to be exempt from // garbage collection. This can be achieved with the following DDL @@ -509,10 +530,10 @@ message ExecuteSqlRequest { // ``` // // The list of available statistics packages can be queried from - // `SPANNER_SYS.OPTIMIZER_STATISTICS_PACKAGES`. + // `INFORMATION_SCHEMA.SPANNER_STATISTICS`. // // Executing a SQL statement with an invalid optimizer statistics package - // or with statistics package that allows garbage collection fails with + // or with a statistics package that allows garbage collection fails with // an `INVALID_ARGUMENT` error. string optimizer_statistics_package = 2; } diff --git a/google/cloud/spanner_v1/types/spanner.py b/google/cloud/spanner_v1/types/spanner.py index 5ced795ef4..acb32c8ff9 100644 --- a/google/cloud/spanner_v1/types/spanner.py +++ b/google/cloud/spanner_v1/types/spanner.py @@ -247,6 +247,24 @@ class RequestOptions(proto.Message): Attributes: priority (google.cloud.spanner_v1.types.RequestOptions.Priority): Priority for the request. + request_tag (str): + A per-request tag which can be applied to queries or reads, + used for statistics collection. Both request_tag and + transaction_tag can be specified for a read or query that + belongs to a transaction. This field is ignored for requests + where it's not applicable (e.g. CommitRequest). + ``request_tag`` must be a valid identifier of the form: + ``[a-zA-Z][a-zA-Z0-9_\-]`` between 2 and 64 characters in + length + transaction_tag (str): + A tag used for statistics collection about this transaction. + Both request_tag and transaction_tag can be specified for a + read or query that belongs to a transaction. The value of + transaction_tag should be the same for all requests + belonging to the same transaction. If this request doesn’t + belong to any transaction, transaction_tag will be ignored. + ``transaction_tag`` must be a valid identifier of the + format: ``[a-zA-Z][a-zA-Z0-9_\-]{0,49}`` """ class Priority(proto.Enum): @@ -275,6 +293,10 @@ class Priority(proto.Enum): priority = proto.Field(proto.ENUM, number=1, enum=Priority,) + request_tag = proto.Field(proto.STRING, number=2) + + transaction_tag = proto.Field(proto.STRING, number=3) + class ExecuteSqlRequest(proto.Message): r"""The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] @@ -411,16 +433,17 @@ class QueryOptions(proto.Message): The ``optimizer_version`` statement hint has precedence over this setting. optimizer_statistics_package (str): - Query optimizer statistics package to use. + An option to control the selection of optimizer statistics + package. This parameter allows individual queries to use a different - query optimizer statistics. + query optimizer statistics package. Specifying ``latest`` as a value instructs Cloud Spanner to use the latest generated statistics package. If not - specified, Cloud Spanner uses statistics package set at the - database level options, or latest if the database option is - not set. + specified, Cloud Spanner uses the statistics package set at + the database level options, or the latest package if the + database option is not set. The statistics package requested by the query has to be exempt from garbage collection. This can be achieved with @@ -431,10 +454,10 @@ class QueryOptions(proto.Message): ALTER STATISTICS SET OPTIONS (allow_gc=false) The list of available statistics packages can be queried - from ``SPANNER_SYS.OPTIMIZER_STATISTICS_PACKAGES``. + from ``INFORMATION_SCHEMA.SPANNER_STATISTICS``. Executing a SQL statement with an invalid optimizer - statistics package or with statistics package that allows + statistics package or with a statistics package that allows garbage collection fails with an ``INVALID_ARGUMENT`` error. """ diff --git a/synth.metadata b/synth.metadata index 4a63d02d2d..9b53419d72 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "6598bb829c9e9a534be674649ffd1b4671a821f9", - "internalRef": "364449524" + "sha": "6ce40ff8faf68226782f507ca6b2d497a77044de", + "internalRef": "365498709" } }, { From 5f8f3414f39e26d91e479fdfeab42dabbb40e819 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim Date: Tue, 30 Mar 2021 17:11:10 +0000 Subject: [PATCH 08/10] fix: fix dependencies --- UPGRADING.md | 44 ++++++++++++++++++------------------- noxfile.py | 39 ++++++++++++++++++++++++++------ setup.py | 6 ++--- testing/constraints-3.6.txt | 4 ++-- 4 files changed, 59 insertions(+), 34 deletions(-) diff --git a/UPGRADING.md b/UPGRADING.md index e90f2141bf..1a0bdfe19a 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -14,13 +14,13 @@ limitations under the License. # 2.0.0 Migration Guide -The 2.0 release of the `google-cloud-spanner` client is a significant update based on a +The 2.0 release of the `google-cloud-spanner` client is a significant update based on a [next-gen code generator](https://github.com/googleapis/gapic-generator-python). -It drops support for Python versions below 3.6. +It drops support for Python versions below 3.6. The handwritten client surfaces have minor changes which may require minimal updates to existing user code. -The generated client surfaces have substantial interface changes. Existing user code which uses these surfaces directly +The generated client surfaces have substantial interface changes. Existing user code which uses these surfaces directly will require significant updates to use this version. This document describes the changes that have been made, and what you need to do to update your usage. @@ -89,7 +89,7 @@ for database_pb in instance.list_databases(): > **WARNING**: Breaking change The library now handles pages for the user. Previously, the library would return a page generator which required a user -to then iterate over each page to get the resource. Now, the library handles iterating over the pages and only returns +to then iterate over each page to get the resource. Now, the library handles iterating over the pages and only returns the resource protos. **Before:** @@ -176,14 +176,14 @@ for database_pb in instance.list_databases(): Methods expect request objects. We provide scripts that will convert most common use cases. -* Install the library +* Install the library with `libcst`. ```py -python3 -m pip install google-cloud-spanner +python3 -m pip install google-cloud-spanner[libcst] ``` * The scripts `fixup_spanner_v1_keywords.py`, `fixup_spanner_admin_database_v1_keywords.py`, and -`fixup_spanner_admin_instance_v1_keywords.py` are shipped with the library. They expect an input directory (with the +`fixup_spanner_admin_instance_v1_keywords.py` are shipped with the library. They expect an input directory (with the code to convert) and an empty destination directory. ```sh @@ -194,10 +194,10 @@ $ fixup_spanner_v1_keywords.py --input-directory .samples/ --output-directory sa >the handwritten surfaces e.g. `client.list_instances()` #### More details - + In `google-cloud-spanner<2.0.0`, parameters required by the API were positional parameters and optional parameters were keyword parameters. - + **Before:** ```py def list_instances( @@ -210,14 +210,14 @@ def list_instances( metadata=None, ): ``` - - In the 2.0.0 release, all methods have a single positional parameter `request`. Method docstrings indicate whether a + + In the 2.0.0 release, all methods have a single positional parameter `request`. Method docstrings indicate whether a parameter is required or optional. - - Some methods have additional keyword only parameters. The available parameters depend on the + + Some methods have additional keyword only parameters. The available parameters depend on the [`google.api.method_signature` annotation](https://github.com/googleapis/googleapis/blob/master/google/spanner/admin/instance/v1/spanner_instance_admin.proto#L86) specified by the API producer. - - + + **After:** ```py def list_instances( @@ -230,13 +230,13 @@ def list_instances( metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListInstancesPager: ``` - + > **NOTE:** The `request` parameter and flattened keyword parameters for the API are mutually exclusive. > Passing both will result in an error. - - + + Both of these calls are valid: - + ```py response = client.list_instances( request={ @@ -244,16 +244,16 @@ def list_instances( } ) ``` - + ```py response = client.execute_sql( parent=project_name, ) ``` - + This call is invalid because it mixes `request` with a keyword argument `parent`. Executing this code will result in an error. - + ```py response = client.execute_sql( request={}, diff --git a/noxfile.py b/noxfile.py index 1a6227824a..14442a4442 100644 --- a/noxfile.py +++ b/noxfile.py @@ -18,6 +18,7 @@ from __future__ import absolute_import import os +import pathlib import shutil import nox @@ -28,7 +29,23 @@ DEFAULT_PYTHON_VERSION = "3.8" SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +# 'docfx' is excluded since it only needs to run in 'docs-presubmit' +nox.options.sessions = [ + "unit", + "system", + "cover", + "lint", + "lint_setup_py", + "blacken", + "docs", +] + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -70,10 +87,15 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. - session.install("asyncmock", "pytest-asyncio") - session.install("mock", "pytest", "pytest-cov") - session.install("-e", ".") + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + session.install("asyncmock", "pytest-asyncio", "-c", constraints_path) + + session.install("mock", "pytest", "pytest-cov", "-c", constraints_path) + + session.install("-e", ".", "-c", constraints_path) # Run py.test against the unit tests. session.run( @@ -90,7 +112,7 @@ def default(session): *session.posargs, ) - session.install("-e", ".[tracing]") + session.install("-e", ".[tracing]", "-c", constraints_path) # Run py.test against the unit tests with OpenTelemetry. session.run( @@ -117,6 +139,9 @@ def unit(session): @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") @@ -143,9 +168,9 @@ def system(session): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. session.install( - "mock", "pytest", "google-cloud-testutils", + "mock", "pytest", "google-cloud-testutils", "-c", constraints_path ) - session.install("-e", ".[tracing]") + session.install("-e", ".[tracing]", "-c", constraints_path) # Run py.test against the system tests. if system_test_exists: diff --git a/setup.py b/setup.py index b414510211..888635b08e 100644 --- a/setup.py +++ b/setup.py @@ -29,10 +29,9 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.22.0, < 2.0.0dev", + "google-api-core[grpc] >= 1.22.2, < 2.0.0dev", "google-cloud-core >= 1.4.1, < 2.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", - "libcst >= 0.2.5", "proto-plus >= 1.11.0", "sqlparse >= 0.3.0", ] @@ -41,7 +40,8 @@ "opentelemetry-api >= 0.11b0", "opentelemetry-sdk >= 0.11b0", "opentelemetry-instrumentation >= 0.11b0", - ] + ], + "libcst": "libcst >= 0.2.5" } diff --git a/testing/constraints-3.6.txt b/testing/constraints-3.6.txt index 050e9c7a18..bfb81c38a2 100644 --- a/testing/constraints-3.6.txt +++ b/testing/constraints-3.6.txt @@ -5,7 +5,7 @@ # # e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 -google-api-core==1.22.0 +google-api-core==1.22.2 google-cloud-core==1.4.1 grpc-google-iam-v1==0.12.3 libcst==0.2.5 @@ -13,4 +13,4 @@ proto-plus==1.13.0 sqlparse==0.3.0 opentelemetry-api==0.11b0 opentelemetry-sdk==0.11b0 -opentelemetry-instrumentation==0.11b0 \ No newline at end of file +opentelemetry-instrumentation==0.11b0 From 18b99572e10ef8c24fbf42dddaf62b9a005dab5b Mon Sep 17 00:00:00 2001 From: larkee Date: Tue, 6 Apr 2021 17:34:51 +1000 Subject: [PATCH 09/10] chore: revert changes taht break tests --- .gitignore | 1 - docs/spanner_admin_database_v1/types.rst | 1 - docs/spanner_admin_instance_v1/types.rst | 1 - docs/spanner_v1/types.rst | 1 - .../cloud/spanner_v1/proto/transaction.proto | 279 +--------------- google/cloud/spanner_v1/types/transaction.py | 303 +----------------- 6 files changed, 5 insertions(+), 581 deletions(-) diff --git a/.gitignore b/.gitignore index b4243ced74..708cdcc9eb 100644 --- a/.gitignore +++ b/.gitignore @@ -45,7 +45,6 @@ pip-log.txt # Built documentation docs/_build -bigquery/docs/generated docs.metadata # Virtual environment diff --git a/docs/spanner_admin_database_v1/types.rst b/docs/spanner_admin_database_v1/types.rst index 95e1d7f88b..fe6c27778b 100644 --- a/docs/spanner_admin_database_v1/types.rst +++ b/docs/spanner_admin_database_v1/types.rst @@ -3,5 +3,4 @@ Types for Google Cloud Spanner Admin Database v1 API .. automodule:: google.cloud.spanner_admin_database_v1.types :members: - :undoc-members: :show-inheritance: diff --git a/docs/spanner_admin_instance_v1/types.rst b/docs/spanner_admin_instance_v1/types.rst index 8f7204ebce..250cf6bf9b 100644 --- a/docs/spanner_admin_instance_v1/types.rst +++ b/docs/spanner_admin_instance_v1/types.rst @@ -3,5 +3,4 @@ Types for Google Cloud Spanner Admin Instance v1 API .. automodule:: google.cloud.spanner_admin_instance_v1.types :members: - :undoc-members: :show-inheritance: diff --git a/docs/spanner_v1/types.rst b/docs/spanner_v1/types.rst index 8678aba188..c7ff7e6c71 100644 --- a/docs/spanner_v1/types.rst +++ b/docs/spanner_v1/types.rst @@ -3,5 +3,4 @@ Types for Google Cloud Spanner v1 API .. automodule:: google.cloud.spanner_v1.types :members: - :undoc-members: :show-inheritance: diff --git a/google/cloud/spanner_v1/proto/transaction.proto b/google/cloud/spanner_v1/proto/transaction.proto index 30ef9dc84a..7082c56258 100644 --- a/google/cloud/spanner_v1/proto/transaction.proto +++ b/google/cloud/spanner_v1/proto/transaction.proto @@ -28,284 +28,9 @@ option java_package = "com.google.spanner.v1"; option php_namespace = "Google\\Cloud\\Spanner\\V1"; option ruby_package = "Google::Cloud::Spanner::V1"; -// # Transactions +// TransactionOptions are used to specify different types of transactions. // -// -// Each session can have at most one active transaction at a time (note that -// standalone reads and queries use a transaction internally and do count -// towards the one transaction limit). After the active transaction is -// completed, the session can immediately be re-used for the next transaction. -// It is not necessary to create a new session for each transaction. -// -// # Transaction Modes -// -// Cloud Spanner supports three transaction modes: -// -// 1. Locking read-write. This type of transaction is the only way -// to write data into Cloud Spanner. These transactions rely on -// pessimistic locking and, if necessary, two-phase commit. -// Locking read-write transactions may abort, requiring the -// application to retry. -// -// 2. Snapshot read-only. This transaction type provides guaranteed -// consistency across several reads, but does not allow -// writes. Snapshot read-only transactions can be configured to -// read at timestamps in the past. Snapshot read-only -// transactions do not need to be committed. -// -// 3. Partitioned DML. This type of transaction is used to execute -// a single Partitioned DML statement. Partitioned DML partitions -// the key space and runs the DML statement over each partition -// in parallel using separate, internal transactions that commit -// independently. Partitioned DML transactions do not need to be -// committed. -// -// For transactions that only read, snapshot read-only transactions -// provide simpler semantics and are almost always faster. In -// particular, read-only transactions do not take locks, so they do -// not conflict with read-write transactions. As a consequence of not -// taking locks, they also do not abort, so retry loops are not needed. -// -// Transactions may only read/write data in a single database. They -// may, however, read/write data in different tables within that -// database. -// -// ## Locking Read-Write Transactions -// -// Locking transactions may be used to atomically read-modify-write -// data anywhere in a database. This type of transaction is externally -// consistent. -// -// Clients should attempt to minimize the amount of time a transaction -// is active. Faster transactions commit with higher probability -// and cause less contention. Cloud Spanner attempts to keep read locks -// active as long as the transaction continues to do reads, and the -// transaction has not been terminated by -// [Commit][google.spanner.v1.Spanner.Commit] or -// [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of -// inactivity at the client may cause Cloud Spanner to release a -// transaction's locks and abort it. -// -// Conceptually, a read-write transaction consists of zero or more -// reads or SQL statements followed by -// [Commit][google.spanner.v1.Spanner.Commit]. At any time before -// [Commit][google.spanner.v1.Spanner.Commit], the client can send a -// [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the -// transaction. -// -// ## Semantics -// -// Cloud Spanner can commit the transaction if all read locks it acquired -// are still valid at commit time, and it is able to acquire write -// locks for all writes. Cloud Spanner can abort the transaction for any -// reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees -// that the transaction has not modified any user data in Cloud Spanner. -// -// Unless the transaction commits, Cloud Spanner makes no guarantees about -// how long the transaction's locks were held for. It is an error to -// use Cloud Spanner locks for any sort of mutual exclusion other than -// between Cloud Spanner transactions themselves. -// -// ## Retrying Aborted Transactions -// -// When a transaction aborts, the application can choose to retry the -// whole transaction again. To maximize the chances of successfully -// committing the retry, the client should execute the retry in the -// same session as the original attempt. The original session's lock -// priority increases with each consecutive abort, meaning that each -// attempt has a slightly better chance of success than the previous. -// -// Under some circumstances (e.g., many transactions attempting to -// modify the same row(s)), a transaction can abort many times in a -// short period before successfully committing. Thus, it is not a good -// idea to cap the number of retries a transaction can attempt; -// instead, it is better to limit the total amount of wall time spent -// retrying. -// -// ## Idle Transactions -// -// A transaction is considered idle if it has no outstanding reads or -// SQL queries and has not started a read or SQL query within the last 10 -// seconds. Idle transactions can be aborted by Cloud Spanner so that they -// don't hold on to locks indefinitely. In that case, the commit will -// fail with error `ABORTED`. -// -// If this behavior is undesirable, periodically executing a simple -// SQL query in the transaction (e.g., `SELECT 1`) prevents the -// transaction from becoming idle. -// -// ## Snapshot Read-Only Transactions -// -// Snapshot read-only transactions provides a simpler method than -// locking read-write transactions for doing several consistent -// reads. However, this type of transaction does not support writes. -// -// Snapshot transactions do not take locks. Instead, they work by -// choosing a Cloud Spanner timestamp, then executing all reads at that -// timestamp. Since they do not acquire locks, they do not block -// concurrent read-write transactions. -// -// Unlike locking read-write transactions, snapshot read-only -// transactions never abort. They can fail if the chosen read -// timestamp is garbage collected; however, the default garbage -// collection policy is generous enough that most applications do not -// need to worry about this in practice. -// -// Snapshot read-only transactions do not need to call -// [Commit][google.spanner.v1.Spanner.Commit] or -// [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not -// permitted to do so). -// -// To execute a snapshot transaction, the client specifies a timestamp -// bound, which tells Cloud Spanner how to choose a read timestamp. -// -// The types of timestamp bound are: -// -// - Strong (the default). -// - Bounded staleness. -// - Exact staleness. -// -// If the Cloud Spanner database to be read is geographically distributed, -// stale read-only transactions can execute more quickly than strong -// or read-write transaction, because they are able to execute far -// from the leader replica. -// -// Each type of timestamp bound is discussed in detail below. -// -// ## Strong -// -// Strong reads are guaranteed to see the effects of all transactions -// that have committed before the start of the read. Furthermore, all -// rows yielded by a single read are consistent with each other -- if -// any part of the read observes a transaction, all parts of the read -// see the transaction. -// -// Strong reads are not repeatable: two consecutive strong read-only -// transactions might return inconsistent results if there are -// concurrent writes. If consistency across reads is required, the -// reads should be executed within a transaction or at an exact read -// timestamp. -// -// See [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. -// -// ## Exact Staleness -// -// These timestamp bounds execute reads at a user-specified -// timestamp. Reads at a timestamp are guaranteed to see a consistent -// prefix of the global transaction history: they observe -// modifications done by all transactions with a commit timestamp <= -// the read timestamp, and observe none of the modifications done by -// transactions with a larger commit timestamp. They will block until -// all conflicting transactions that may be assigned commit timestamps -// <= the read timestamp have finished. -// -// The timestamp can either be expressed as an absolute Cloud Spanner commit -// timestamp or a staleness relative to the current time. -// -// These modes do not require a "negotiation phase" to pick a -// timestamp. As a result, they execute slightly faster than the -// equivalent boundedly stale concurrency modes. On the other hand, -// boundedly stale reads usually return fresher results. -// -// See [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] and -// [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. -// -// ## Bounded Staleness -// -// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, -// subject to a user-provided staleness bound. Cloud Spanner chooses the -// newest timestamp within the staleness bound that allows execution -// of the reads at the closest available replica without blocking. -// -// All rows yielded are consistent with each other -- if any part of -// the read observes a transaction, all parts of the read see the -// transaction. Boundedly stale reads are not repeatable: two stale -// reads, even if they use the same staleness bound, can execute at -// different timestamps and thus return inconsistent results. -// -// Boundedly stale reads execute in two phases: the first phase -// negotiates a timestamp among all replicas needed to serve the -// read. In the second phase, reads are executed at the negotiated -// timestamp. -// -// As a result of the two phase execution, bounded staleness reads are -// usually a little slower than comparable exact staleness -// reads. However, they are typically able to return fresher -// results, and are more likely to execute at the closest replica. -// -// Because the timestamp negotiation requires up-front knowledge of -// which rows will be read, it can only be used with single-use -// read-only transactions. -// -// See [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] and -// [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. -// -// ## Old Read Timestamps and Garbage Collection -// -// Cloud Spanner continuously garbage collects deleted and overwritten data -// in the background to reclaim storage space. This process is known -// as "version GC". By default, version GC reclaims versions after they -// are one hour old. Because of this, Cloud Spanner cannot perform reads -// at read timestamps more than one hour in the past. This -// restriction also applies to in-progress reads and/or SQL queries whose -// timestamp become too old while executing. Reads and SQL queries with -// too-old read timestamps fail with the error `FAILED_PRECONDITION`. -// -// ## Partitioned DML Transactions -// -// Partitioned DML transactions are used to execute DML statements with a -// different execution strategy that provides different, and often better, -// scalability properties for large, table-wide operations than DML in a -// ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, -// should prefer using ReadWrite transactions. -// -// Partitioned DML partitions the keyspace and runs the DML statement on each -// partition in separate, internal transactions. These transactions commit -// automatically when complete, and run independently from one another. -// -// To reduce lock contention, this execution strategy only acquires read locks -// on rows that match the WHERE clause of the statement. Additionally, the -// smaller per-partition transactions hold locks for less time. -// -// That said, Partitioned DML is not a drop-in replacement for standard DML used -// in ReadWrite transactions. -// -// - The DML statement must be fully-partitionable. Specifically, the statement -// must be expressible as the union of many statements which each access only -// a single row of the table. -// -// - The statement is not applied atomically to all rows of the table. Rather, -// the statement is applied atomically to partitions of the table, in -// independent transactions. Secondary index rows are updated atomically -// with the base table rows. -// -// - Partitioned DML does not guarantee exactly-once execution semantics -// against a partition. The statement will be applied at least once to each -// partition. It is strongly recommended that the DML statement should be -// idempotent to avoid unexpected results. For instance, it is potentially -// dangerous to run a statement such as -// `UPDATE table SET column = column + 1` as it could be run multiple times -// against some rows. -// -// - The partitions are committed automatically - there is no support for -// Commit or Rollback. If the call returns an error, or if the client issuing -// the ExecuteSql call dies, it is possible that some rows had the statement -// executed on them successfully. It is also possible that statement was -// never executed against other rows. -// -// - Partitioned DML transactions may only contain the execution of a single -// DML statement via ExecuteSql or ExecuteStreamingSql. -// -// - If any error is encountered during the execution of the partitioned DML -// operation (for instance, a UNIQUE INDEX violation, division by zero, or a -// value that cannot be stored due to schema constraints), then the -// operation is stopped at that point and an error is returned. It is -// possible that at this point, some partitions have been committed (or even -// committed multiple times), and other partitions have not been run at all. -// -// Given the above, Partitioned DML is good fit for large, database-wide, -// operations that are idempotent, such as deleting old rows from a very large -// table. +// For more info, see: https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction message TransactionOptions { // Message type to initiate a read-write transaction. Currently this // transaction type has no options. diff --git a/google/cloud/spanner_v1/types/transaction.py b/google/cloud/spanner_v1/types/transaction.py index ddda88b384..e20c6ad7b4 100644 --- a/google/cloud/spanner_v1/types/transaction.py +++ b/google/cloud/spanner_v1/types/transaction.py @@ -29,306 +29,9 @@ class TransactionOptions(proto.Message): - r"""Transactions - ============ - - Each session can have at most one active transaction at a time (note - that standalone reads and queries use a transaction internally and - do count towards the one transaction limit). After the active - transaction is completed, the session can immediately be re-used for - the next transaction. It is not necessary to create a new session - for each transaction. - - Transaction Modes - ================= - - Cloud Spanner supports three transaction modes: - - 1. Locking read-write. This type of transaction is the only way to - write data into Cloud Spanner. These transactions rely on - pessimistic locking and, if necessary, two-phase commit. Locking - read-write transactions may abort, requiring the application to - retry. - - 2. Snapshot read-only. This transaction type provides guaranteed - consistency across several reads, but does not allow writes. - Snapshot read-only transactions can be configured to read at - timestamps in the past. Snapshot read-only transactions do not - need to be committed. - - 3. Partitioned DML. This type of transaction is used to execute a - single Partitioned DML statement. Partitioned DML partitions the - key space and runs the DML statement over each partition in - parallel using separate, internal transactions that commit - independently. Partitioned DML transactions do not need to be - committed. - - For transactions that only read, snapshot read-only transactions - provide simpler semantics and are almost always faster. In - particular, read-only transactions do not take locks, so they do not - conflict with read-write transactions. As a consequence of not - taking locks, they also do not abort, so retry loops are not needed. - - Transactions may only read/write data in a single database. They - may, however, read/write data in different tables within that - database. - - Locking Read-Write Transactions - ------------------------------- - - Locking transactions may be used to atomically read-modify-write - data anywhere in a database. This type of transaction is externally - consistent. - - Clients should attempt to minimize the amount of time a transaction - is active. Faster transactions commit with higher probability and - cause less contention. Cloud Spanner attempts to keep read locks - active as long as the transaction continues to do reads, and the - transaction has not been terminated by - [Commit][google.spanner.v1.Spanner.Commit] or - [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of - inactivity at the client may cause Cloud Spanner to release a - transaction's locks and abort it. - - Conceptually, a read-write transaction consists of zero or more - reads or SQL statements followed by - [Commit][google.spanner.v1.Spanner.Commit]. At any time before - [Commit][google.spanner.v1.Spanner.Commit], the client can send a - [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the - transaction. - - Semantics - --------- - - Cloud Spanner can commit the transaction if all read locks it - acquired are still valid at commit time, and it is able to acquire - write locks for all writes. Cloud Spanner can abort the transaction - for any reason. If a commit attempt returns ``ABORTED``, Cloud - Spanner guarantees that the transaction has not modified any user - data in Cloud Spanner. - - Unless the transaction commits, Cloud Spanner makes no guarantees - about how long the transaction's locks were held for. It is an error - to use Cloud Spanner locks for any sort of mutual exclusion other - than between Cloud Spanner transactions themselves. - - Retrying Aborted Transactions - ----------------------------- - - When a transaction aborts, the application can choose to retry the - whole transaction again. To maximize the chances of successfully - committing the retry, the client should execute the retry in the - same session as the original attempt. The original session's lock - priority increases with each consecutive abort, meaning that each - attempt has a slightly better chance of success than the previous. - - Under some circumstances (e.g., many transactions attempting to - modify the same row(s)), a transaction can abort many times in a - short period before successfully committing. Thus, it is not a good - idea to cap the number of retries a transaction can attempt; - instead, it is better to limit the total amount of wall time spent - retrying. - - Idle Transactions - ----------------- - - A transaction is considered idle if it has no outstanding reads or - SQL queries and has not started a read or SQL query within the last - 10 seconds. Idle transactions can be aborted by Cloud Spanner so - that they don't hold on to locks indefinitely. In that case, the - commit will fail with error ``ABORTED``. - - If this behavior is undesirable, periodically executing a simple SQL - query in the transaction (e.g., ``SELECT 1``) prevents the - transaction from becoming idle. - - Snapshot Read-Only Transactions - ------------------------------- - - Snapshot read-only transactions provides a simpler method than - locking read-write transactions for doing several consistent reads. - However, this type of transaction does not support writes. - - Snapshot transactions do not take locks. Instead, they work by - choosing a Cloud Spanner timestamp, then executing all reads at that - timestamp. Since they do not acquire locks, they do not block - concurrent read-write transactions. - - Unlike locking read-write transactions, snapshot read-only - transactions never abort. They can fail if the chosen read timestamp - is garbage collected; however, the default garbage collection policy - is generous enough that most applications do not need to worry about - this in practice. - - Snapshot read-only transactions do not need to call - [Commit][google.spanner.v1.Spanner.Commit] or - [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not - permitted to do so). - - To execute a snapshot transaction, the client specifies a timestamp - bound, which tells Cloud Spanner how to choose a read timestamp. - - The types of timestamp bound are: - - - Strong (the default). - - Bounded staleness. - - Exact staleness. - - If the Cloud Spanner database to be read is geographically - distributed, stale read-only transactions can execute more quickly - than strong or read-write transaction, because they are able to - execute far from the leader replica. - - Each type of timestamp bound is discussed in detail below. - - Strong - ------ - - Strong reads are guaranteed to see the effects of all transactions - that have committed before the start of the read. Furthermore, all - rows yielded by a single read are consistent with each other -- if - any part of the read observes a transaction, all parts of the read - see the transaction. - - Strong reads are not repeatable: two consecutive strong read-only - transactions might return inconsistent results if there are - concurrent writes. If consistency across reads is required, the - reads should be executed within a transaction or at an exact read - timestamp. - - See - [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. - - Exact Staleness - --------------- - - These timestamp bounds execute reads at a user-specified timestamp. - Reads at a timestamp are guaranteed to see a consistent prefix of - the global transaction history: they observe modifications done by - all transactions with a commit timestamp <= the read timestamp, and - observe none of the modifications done by transactions with a larger - commit timestamp. They will block until all conflicting transactions - that may be assigned commit timestamps <= the read timestamp have - finished. - - The timestamp can either be expressed as an absolute Cloud Spanner - commit timestamp or a staleness relative to the current time. - - These modes do not require a "negotiation phase" to pick a - timestamp. As a result, they execute slightly faster than the - equivalent boundedly stale concurrency modes. On the other hand, - boundedly stale reads usually return fresher results. - - See - [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] - and - [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. - - Bounded Staleness - ----------------- - - Bounded staleness modes allow Cloud Spanner to pick the read - timestamp, subject to a user-provided staleness bound. Cloud Spanner - chooses the newest timestamp within the staleness bound that allows - execution of the reads at the closest available replica without - blocking. - - All rows yielded are consistent with each other -- if any part of - the read observes a transaction, all parts of the read see the - transaction. Boundedly stale reads are not repeatable: two stale - reads, even if they use the same staleness bound, can execute at - different timestamps and thus return inconsistent results. - - Boundedly stale reads execute in two phases: the first phase - negotiates a timestamp among all replicas needed to serve the read. - In the second phase, reads are executed at the negotiated timestamp. - - As a result of the two phase execution, bounded staleness reads are - usually a little slower than comparable exact staleness reads. - However, they are typically able to return fresher results, and are - more likely to execute at the closest replica. - - Because the timestamp negotiation requires up-front knowledge of - which rows will be read, it can only be used with single-use - read-only transactions. - - See - [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] - and - [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. - - Old Read Timestamps and Garbage Collection - ------------------------------------------ - - Cloud Spanner continuously garbage collects deleted and overwritten - data in the background to reclaim storage space. This process is - known as "version GC". By default, version GC reclaims versions - after they are one hour old. Because of this, Cloud Spanner cannot - perform reads at read timestamps more than one hour in the past. - This restriction also applies to in-progress reads and/or SQL - queries whose timestamp become too old while executing. Reads and - SQL queries with too-old read timestamps fail with the error - ``FAILED_PRECONDITION``. - - Partitioned DML Transactions - ---------------------------- - - Partitioned DML transactions are used to execute DML statements with - a different execution strategy that provides different, and often - better, scalability properties for large, table-wide operations than - DML in a ReadWrite transaction. Smaller scoped statements, such as - an OLTP workload, should prefer using ReadWrite transactions. - - Partitioned DML partitions the keyspace and runs the DML statement - on each partition in separate, internal transactions. These - transactions commit automatically when complete, and run - independently from one another. - - To reduce lock contention, this execution strategy only acquires - read locks on rows that match the WHERE clause of the statement. - Additionally, the smaller per-partition transactions hold locks for - less time. - - That said, Partitioned DML is not a drop-in replacement for standard - DML used in ReadWrite transactions. - - - The DML statement must be fully-partitionable. Specifically, the - statement must be expressible as the union of many statements - which each access only a single row of the table. - - - The statement is not applied atomically to all rows of the table. - Rather, the statement is applied atomically to partitions of the - table, in independent transactions. Secondary index rows are - updated atomically with the base table rows. - - - Partitioned DML does not guarantee exactly-once execution - semantics against a partition. The statement will be applied at - least once to each partition. It is strongly recommended that the - DML statement should be idempotent to avoid unexpected results. - For instance, it is potentially dangerous to run a statement such - as ``UPDATE table SET column = column + 1`` as it could be run - multiple times against some rows. - - - The partitions are committed automatically - there is no support - for Commit or Rollback. If the call returns an error, or if the - client issuing the ExecuteSql call dies, it is possible that some - rows had the statement executed on them successfully. It is also - possible that statement was never executed against other rows. - - - Partitioned DML transactions may only contain the execution of a - single DML statement via ExecuteSql or ExecuteStreamingSql. - - - If any error is encountered during the execution of the - partitioned DML operation (for instance, a UNIQUE INDEX - violation, division by zero, or a value that cannot be stored due - to schema constraints), then the operation is stopped at that - point and an error is returned. It is possible that at this - point, some partitions have been committed (or even committed - multiple times), and other partitions have not been run at all. - - Given the above, Partitioned DML is good fit for large, - database-wide, operations that are idempotent, such as deleting old - rows from a very large table. + r"""TransactionOptions are used to specify different types of transactions. + + For more info, see: https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction Attributes: read_write (google.cloud.spanner_v1.types.TransactionOptions.ReadWrite): From a1743adc61da3bd38d5a3c5a917f135a5cd36751 Mon Sep 17 00:00:00 2001 From: larkee Date: Tue, 6 Apr 2021 17:38:47 +1000 Subject: [PATCH 10/10] style: fix lint --- noxfile.py | 4 +--- setup.py | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/noxfile.py b/noxfile.py index 14442a4442..350612fdd5 100644 --- a/noxfile.py +++ b/noxfile.py @@ -167,9 +167,7 @@ def system(session): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. - session.install( - "mock", "pytest", "google-cloud-testutils", "-c", constraints_path - ) + session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path) session.install("-e", ".[tracing]", "-c", constraints_path) # Run py.test against the system tests. diff --git a/setup.py b/setup.py index 888635b08e..086073df4f 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ "opentelemetry-sdk >= 0.11b0", "opentelemetry-instrumentation >= 0.11b0", ], - "libcst": "libcst >= 0.2.5" + "libcst": "libcst >= 0.2.5", }